data_diagnosis.py 19 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""A module for baseline-based data diagnosis."""
from typing import Callable
6
from pathlib import Path
7
import json
8
9

import pandas as pd
10
import numpy as np
11
12
13

from superbench.common.utils import logger
from superbench.analyzer.diagnosis_rule_op import RuleOp, DiagnosisRuleType
14
from superbench.analyzer import file_handler
15
from superbench.analyzer import RuleBase
16
from superbench.analyzer import data_analysis
17
18


19
class DataDiagnosis(RuleBase):
20
21
22
    """The DataDiagnosis class to do the baseline-based data diagnosis."""
    def __init__(self):
        """Init function."""
23
        super().__init__()
24

25
    def _check_and_format_rules(self, rule, name):
26
27
28
29
30
31
32
33
34
35
        """Check the rule of the metric whether the formart is valid.

        Args:
            rule (dict): the rule
            name (str): the rule name

        Returns:
            dict: the rule for the metric
        """
        # check if rule is supported
36
        super()._check_and_format_rules(rule, name)
37
38
39
40
41
42
43
44
45
        if 'function' not in rule:
            logger.log_and_raise(exception=Exception, msg='{} lack of function'.format(name))
        if not isinstance(DiagnosisRuleType(rule['function']), DiagnosisRuleType):
            logger.log_and_raise(exception=Exception, msg='{} invalid function name'.format(name))
        # check rule format
        if 'criteria' not in rule:
            logger.log_and_raise(exception=Exception, msg='{} lack of criteria'.format(name))
        if not isinstance(eval(rule['criteria']), Callable):
            logger.log_and_raise(exception=Exception, msg='invalid criteria format')
46
47
48
49
50
        if rule['function'] != 'multi_rules':
            if 'metrics' not in rule:
                logger.log_and_raise(exception=Exception, msg='{} lack of metrics'.format(name))
        if 'store' in rule and not isinstance(rule['store'], bool):
            logger.log_and_raise(exception=Exception, msg='{} store must be bool type'.format(name))
51
52
53
54
55
56
57
58
59
60
61
62
63
64
        return rule

    def _get_baseline_of_metric(self, baseline, metric):
        """Get the baseline value of the metric.

        Args:
            baseline (dict): baseline defined in baseline file
            metric (str): the full name of the metric

        Returns:
            numeric: the baseline value of the metric
        """
        if metric in baseline:
            return baseline[metric]
65
66
        elif 'return_code' in metric:
            return 0
67
        else:
68
            short = metric
69
            # exclude rank info, for example, '.*:\d+'->'.*'
70
71
72
73
            if ':' in metric:
                short = metric.strip(metric.split(':')[-1]).strip(':')
            else:
                short = metric.split('/')[0]
74
75
76
77
78
79
80
            if short in baseline:
                return baseline[short]
            # baseline not defined
            else:
                logger.warning('DataDiagnosis: get baseline - {} baseline not found'.format(metric))
                return -1

81
82
    def __get_metrics_and_baseline(self, rule, benchmark_rules, baseline):
        """Get metrics with baseline in the rule.
83

84
85
        Parse metric regex in the rule, and store the (baseline, metric) pair
        in _sb_rules[rule]['metrics'] and metric in _enable_metrics。
86
87

        Args:
88
89
90
91
            rule (str): the name of the rule
            benchmark_rules (dict): the dict of rules
            baseline (dict): the dict of baseline of metrics
        """
92
        if 'function' in self._sb_rules[rule] and self._sb_rules[rule]['function'] == 'multi_rules':
93
            return
94
95
96
        self._get_metrics(rule, benchmark_rules)
        for metric in self._sb_rules[rule]['metrics']:
            self._sb_rules[rule]['metrics'][metric] = self._get_baseline_of_metric(baseline, metric)
97
98
99
100
101
102
103

    def _parse_rules_and_baseline(self, rules, baseline):
        """Parse and merge rules and baseline read from file.

        Args:
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
104
105
106
107
108

        Returns:
            bool: return True if successfully get the criteria for all rules, otherwise False.
        """
        try:
109
            if not rules:
110
111
112
                logger.error('DataDiagnosis: get criteria failed')
                return False
            self._sb_rules = {}
113
            self._enable_metrics = set()
114
            benchmark_rules = rules['superbench']['rules']
115
            self._raw_rules = benchmark_rules
116
            for rule in benchmark_rules:
117
                benchmark_rules[rule] = self._check_and_format_rules(benchmark_rules[rule], rule)
118
                self._sb_rules[rule] = {}
119
                self._sb_rules[rule]['name'] = rule
120
                self._sb_rules[rule]['function'] = benchmark_rules[rule]['function']
121
122
                self._sb_rules[rule]['store'] = True if 'store' in benchmark_rules[
                    rule] and benchmark_rules[rule]['store'] is True else False
123
124
125
                self._sb_rules[rule]['criteria'] = benchmark_rules[rule]['criteria']
                self._sb_rules[rule]['categories'] = benchmark_rules[rule]['categories']
                self._sb_rules[rule]['metrics'] = {}
126
127
                self.__get_metrics_and_baseline(rule, benchmark_rules, baseline)
            self._enable_metrics = sorted(list(self._enable_metrics))
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
        except Exception as e:
            logger.error('DataDiagnosis: get criteria failed - {}'.format(str(e)))
            return False

        return True

    def _run_diagnosis_rules_for_single_node(self, node):
        """Use rules to diagnosis single node data.

        Use the rules defined in rule_file to diagnose the raw data of each node,
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and data summary of defective node.

        Args:
            node (str): the node to do the diagosis

        Returns:
            details_row (list): None if the node is not labeled as defective,
                otherwise details of ['Category', 'Defective Details']
            summary_data_row (dict): None if the node is not labeled as defective,
                otherwise data summary of the metrics
        """
        data_row = self._raw_data_df.loc[node]
        issue_label = False
        details = []
        categories = set()
154
        violation = {}
155
156
157
158
159
160
        summary_data_row = pd.Series(index=self._enable_metrics, name=node, dtype=float)
        # Check each rule
        for rule in self._sb_rules:
            # Get rule op function and run the rule
            function_name = self._sb_rules[rule]['function']
            rule_op = RuleOp.get_rule_func(DiagnosisRuleType(function_name))
161
162
163
            violated_num = 0
            if rule_op == RuleOp.multi_rules:
                violated_num = rule_op(self._sb_rules[rule], details, categories, violation)
164
165
166
167
            elif rule_op == RuleOp.failure_check:
                violated_num = rule_op(
                    data_row, self._sb_rules[rule], summary_data_row, details, categories, self._raw_rules[rule]
                )
168
169
            else:
                violated_num = rule_op(data_row, self._sb_rules[rule], summary_data_row, details, categories)
170
            # label the node as defective one
171
172
173
            if self._sb_rules[rule]['store']:
                violation[rule] = violated_num
            elif violated_num:
174
175
176
                issue_label = True
        if issue_label:
            # Add category information
177
178
            general_cat_str = ','.join(sorted(list(categories)))
            details_cat_str = ','.join(sorted((details)))
179
180
181
182
183
            details_row = [general_cat_str, details_cat_str]
            return details_row, summary_data_row

        return None, None

184
    def run_diagnosis_rules(self, rules, baseline):
185
186
        """Rule-based data diagnosis for multiple nodes' raw data.

187
        Use the rules defined in rules to diagnose the raw data of each node,
188
189
190
191
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and processed data of defective node.

        Args:
192
193
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
194
195
196
197
198
199
200
201
202
203

        Returns:
            data_not_accept_df (DataFrame): defective nodes's detailed information
            label_df (DataFrame): labels for all nodes
        """
        try:
            summary_columns = ['Category', 'Defective Details']
            data_not_accept_df = pd.DataFrame(columns=summary_columns)
            summary_details_df = pd.DataFrame()
            label_df = pd.DataFrame(columns=['label'])
204
            if not self._parse_rules_and_baseline(rules, baseline):
205
206
207
208
209
210
                return data_not_accept_df, label_df
            # run diagnosis rules for each node
            for node in self._raw_data_df.index:
                details_row, summary_data_row = self._run_diagnosis_rules_for_single_node(node)
                if details_row:
                    data_not_accept_df.loc[node] = details_row
211
212
213
214
                    summary_details_df = pd.concat(
                        [summary_details_df,
                         pd.DataFrame([summary_data_row.to_dict()], index=[summary_data_row.name])]
                    )
215
216
217
218
219
220
221
222
223
224
225
226
                    label_df.loc[node] = 1
                else:
                    label_df.loc[node] = 0
            # combine details for defective nodes
            if len(data_not_accept_df) != 0:
                data_not_accept_df = data_not_accept_df.join(summary_details_df)
                data_not_accept_df = data_not_accept_df.sort_values(by=summary_columns, ascending=False)

        except Exception as e:
            logger.error('DataDiagnosis: run diagnosis rules failed, message: {}'.format(str(e)))
        return data_not_accept_df, label_df

227
228
229
230
231
232
233
234
    def output_all_nodes_results(self, raw_data_df, data_not_accept_df):
        """Output diagnosis results of all nodes.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information

        Returns:
235
236
            DataFrame: all nodes' detailed information inluding ['Accept','Number Of Issues',
            'Category','Defective Details']
237
        """
238
        append_columns = ['Accept', 'Number Of Issues', 'Category', 'Defective Details']
239
        all_data_df = (raw_data_df).astype('float64')
240
241
242

        if data_not_accept_df.shape[0] == 0:
            all_data_df['Accept'] = [True for i in range(len(all_data_df))]
243
            all_data_df['Number Of Issues'] = [0 for i in range(len(all_data_df))]
244
            all_data_df['Category'] = [None for i in range(len(all_data_df))]
245
            all_data_df['Defective Details'] = [None for i in range(len(all_data_df))]
246
247
248

        elif data_not_accept_df.shape[0] > 0:
            data_not_accept_df['Accept'] = [False for i in range(len(data_not_accept_df))]
249
            data_not_accept_df['Number Of Issues'] = data_not_accept_df['Defective Details'].map(
250
251
                lambda x: len(x.split(','))
            )
252
253
254
255
256
257
258
259
260
261
262
263
264
            for index in range(len(append_columns)):
                if append_columns[index] not in data_not_accept_df:
                    logger.warning(
                        'DataDiagnosis: output_all_nodes_results - column {} not found in data_not_accept_df.'.format(
                            append_columns[index]
                        )
                    )
                    all_data_df[append_columns[index]] = None
                else:
                    all_data_df = all_data_df.merge(
                        data_not_accept_df[[append_columns[index]]], left_index=True, right_index=True, how='left'
                    )
            all_data_df['Accept'] = all_data_df['Accept'].replace(np.nan, True)
265
266
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].replace(np.nan, 0)
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].astype(int)
267
268
269
270
271

        all_data_df = all_data_df.replace(np.nan, '')

        return all_data_df

272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    def output_diagnosis_in_excel(self, raw_data_df, data_not_accept_df, output_path, rules):
        """Output the raw_data_df and data_not_accept_df results into excel file.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information
            output_path (str): the path of output excel file
            rules (dict): the rules of DataDiagnosis
        """
        try:
            writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
            # Check whether writer is valiad
            if not isinstance(writer, pd.ExcelWriter):
                logger.error('DataDiagnosis: excel_data_output - invalid file path.')
                return
            file_handler.output_excel_raw_data(writer, raw_data_df, 'Raw Data')
            file_handler.output_excel_data_not_accept(writer, data_not_accept_df, rules)
            writer.save()
        except Exception as e:
            logger.error('DataDiagnosis: excel_data_output - {}'.format(str(e)))

293
    def output_diagnosis_in_jsonl(self, data_not_accept_df, output_path):
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
        """Output data_not_accept_df into jsonl file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
        p = Path(output_path)
        try:
            data_not_accept_json = data_not_accept_df.to_json(orient='index')
            data_not_accept = json.loads(data_not_accept_json)
            if not isinstance(data_not_accept_df, pd.DataFrame):
                logger.warning('DataDiagnosis: output json data - data_not_accept_df is not DataFrame.')
                return
            if data_not_accept_df.empty:
                logger.warning('DataDiagnosis: output json data - data_not_accept_df is empty.')
                return
            with p.open('w') as f:
                for node in data_not_accept:
                    line = data_not_accept[node]
                    line['Index'] = node
                    json_str = json.dumps(line)
                    f.write(json_str + '\n')
        except Exception as e:
            logger.error('DataDiagnosis: output json data failed, msg: {}'.format(str(e)))

319
320
321
322
323
324
325
326
    def output_diagnosis_in_json(self, data_not_accept_df, output_path):
        """Output data_not_accept_df into json file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
        data_not_accept_df['Index'] = data_not_accept_df.index
327
328
        data_not_accept_df = data_not_accept_df.rename(
            columns={
329
                'Defective Details': 'diagnosis/issue_details',
330
                'Category': 'diagnosis/category',
331
                'Number Of Issues': 'diagnosis/issue_num',
332
333
334
                'Accept': 'diagnosis/accept'
            }
        )
335
336
337
338
339
340
        data_not_accept_json = data_not_accept_df.to_json(orient='records')
        data_not_accept = json.loads(data_not_accept_json)
        p = Path(output_path)
        with p.open('w') as f:
            json.dump(data_not_accept, f, indent=4)

341
    def generate_md_lines(self, data_not_accept_df, rules, round):
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
        """Convert DataFrame into markdown lines.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            rules (dict): the rules of DataDiagnosis
            round (int): the number of decimal digits

        Returns:
            list: lines in markdown format
        """
        data_not_accept_df['machine'] = data_not_accept_df.index
        header = data_not_accept_df.columns.tolist()
        header = header[-1:] + header[:-1]
        data_not_accept_df = data_not_accept_df[header]
        # format precision of values to n decimal digits
        for rule in rules:
            for metric in rules[rule]['metrics']:
                if rules[rule]['function'] == 'variance':
                    if round and isinstance(round, int):
                        data_not_accept_df[metric] = data_not_accept_df[metric].map(
                            lambda x: x * 100, na_action='ignore'
                        )
                        data_not_accept_df = data_analysis.round_significant_decimal_places(
                            data_not_accept_df, round, [metric]
                        )
                    data_not_accept_df[metric] = data_not_accept_df[metric].map(
                        lambda x: '{}%'.format(x), na_action='ignore'
                    )
                elif rules[rule]['function'] == 'value':
                    if round and isinstance(round, int):
                        data_not_accept_df = data_analysis.round_significant_decimal_places(
                            data_not_accept_df, round, [metric]
                        )
375
        lines = file_handler.generate_md_table(data_not_accept_df, header)
376
377
        return lines

378
379
380
    def run(
        self, raw_data_file, rule_file, baseline_file, output_dir, output_format='excel', output_all=False, round=2
    ):
381
382
383
384
385
386
387
        """Run the data diagnosis and output the results.

        Args:
            raw_data_file (str): the path of raw data jsonl file.
            rule_file (str): The path of baseline yaml file
            baseline_file (str): The path of baseline json file
            output_dir (str): the directory of output file
388
            output_all (bool): output diagnosis results for all nodes
389
            output_format (str): the format of the output, 'excel' or 'json'
390
            round (int): the number of decimal digits
391
392
        """
        try:
393
394
395
            rules = self._preprocess(raw_data_file, rule_file)
            # read baseline
            baseline = file_handler.read_baseline(baseline_file)
396
            logger.info('DataDiagnosis: Begin to process {} nodes'.format(len(self._raw_data_df)))
397
            output_df, label_df = self.run_diagnosis_rules(rules, baseline)
398
            logger.info('DataDiagnosis: Processed finished')
399
            output_path = str(Path(output_dir) / f'diagnosis_summary.{output_format}')
400
401
            # generate all nodes' info
            if output_all:
402
                output_df = self.output_all_nodes_results(self._raw_data_df, output_df)
403
            # output according format
404
            if output_format == 'excel':
405
                output_path = str(Path(output_dir) / 'diagnosis_summary.xlsx')
406
                self.output_diagnosis_in_excel(self._raw_data_df, output_df, output_path, self._sb_rules)
407
            elif output_format == 'json':
408
409
410
                self.output_diagnosis_in_json(output_df, output_path)
            elif output_format == 'jsonl':
                self.output_diagnosis_in_jsonl(output_df, output_path)
411
            elif output_format == 'md' or output_format == 'html':
412
                lines = self.generate_md_lines(output_df, self._sb_rules, round)
413
414
415
416
                if output_format == 'md':
                    file_handler.output_lines_in_md(lines, output_path)
                else:
                    file_handler.output_lines_in_html(lines, output_path)
417
418
419
420
421
            else:
                logger.error('DataDiagnosis: output failed - unsupported output format')
            logger.info('DataDiagnosis: Output results to {}'.format(output_path))
        except Exception as e:
            logger.error('DataDiagnosis: run failed - {}'.format(str(e)))