"superbench/vscode:/vscode.git/clone" did not exist on "8cd264fdeb8e638e9eec308e4a1a2b2d56517b47"
docker_base.py 4.61 KB
Newer Older
1
2
3
4
5
6
7
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the docker-benchmark base class."""

from abc import abstractmethod

8
9
from superbench.common.utils import logger, run_command
from superbench.benchmarks import BenchmarkType, ReturnCode
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from superbench.benchmarks.base import Benchmark


class DockerBenchmark(Benchmark):
    """The base class of benchmarks packaged in docker container."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        super().__init__(name, parameters)
        self._benchmark_type = BenchmarkType.DOCKER
24

25
        # Command lines to launch the docker image and run the benchmarks inside docker.
26
27
28
29
30
31
32
        self._commands = list()

        # Image uri of the current docker-benchmark.
        self._image_uri = None

        # Container name of the current docker-benchmark.
        self._container_name = None
33
34
35
36
37
38
39
40
41
42
43
44
45
46

    '''
    # If need to add new arguments, super().add_parser_arguments() must be called.
    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()
    '''

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
        if not super()._preprocess():
            return False

        if self._image_uri is None:
            self._result.set_return_code(ReturnCode.DOCKERBENCHMARK_IMAGE_NOT_SET)
            logger.error('The image uri is not set - benchmark: {}.'.format(self._name))
            return False

        if self._container_name is None:
            self._result.set_return_code(ReturnCode.DOCKERBENCHMARK_CONTAINER_NOT_SET)
            logger.error('The container name is not set - benchmark: {}.'.format(self._name))
            return False

        output = run_command('docker pull --quiet {}'.format(self._image_uri))
        if output.returncode != 0:
            self._result.set_return_code(ReturnCode.DOCKERBENCHMARK_IMAGE_PULL_FAILURE)
            logger.error(
                'DockerBenchmark pull image failed - benchmark: {}, error message: {}.'.format(
                    self._name, output.stdout
                )
            )
            return False

        return True

    def _postprocess(self):
        """Postprocess/cleanup operations after the benchmarking.

        Return:
            True if _postprocess() succeed.
        """
        rm_containers = 'docker stop --time 20 {container} && docker rm {container}'.format(
            container=self._container_name
        )
        run_command(rm_containers)

        rm_image = 'docker rmi {}'.format(self._image_uri)
        run_command(rm_image)

        return True
87
88

    def _benchmark(self):
89
        """Implementation for benchmarking.
90

91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        Return:
            True if run benchmark successfully.
        """
        for cmd_idx in range(len(self._commands)):
            logger.info(
                'Execute command - round: {}, benchmark: {}, command: {}.'.format(
                    self._curr_run_index, self._name, self._commands[cmd_idx]
                )
            )
            output = run_command(self._commands[cmd_idx])
            if output.returncode != 0:
                self._result.set_return_code(ReturnCode.DOCKERBENCHMARK_EXECUTION_FAILURE)
                logger.error(
                    'DockerBenchmark execution failed - round: {}, benchmark: {}, error message: {}.'.format(
                        self._curr_run_index, self._name, output.stdout
                    )
                )
                return False
            else:
                if not self._process_raw_result(cmd_idx, output.stdout):
                    self._result.set_return_code(ReturnCode.DOCKERBENCHMARK_RESULT_PARSING_FAILURE)
                    return False

        return True

    @abstractmethod
    def _process_raw_result(self, cmd_idx, raw_output):
118
119
        """Function to process raw results and save the summarized results.

120
121
          self._result.add_raw_data() and self._result.add_result() need to be called to save the results.

122
        Args:
123
124
            cmd_idx (int): the index of command corresponding with the raw_output.
            raw_output (str): raw output string of the docker-benchmark.
125
126
127

        Return:
            True if the raw output string is valid and result can be extracted.
128
        """
129
        pass
130
131
132
133
134

    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass