data_diagnosis.py 19.9 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""A module for baseline-based data diagnosis."""
from typing import Callable
6
from pathlib import Path
7
import json
8
9

import pandas as pd
10
import numpy as np
11
12
13

from superbench.common.utils import logger
from superbench.analyzer.diagnosis_rule_op import RuleOp, DiagnosisRuleType
14
from superbench.analyzer import file_handler
15
from superbench.analyzer import RuleBase
16
from superbench.analyzer import data_analysis
17
18


19
class DataDiagnosis(RuleBase):
20
21
22
    """The DataDiagnosis class to do the baseline-based data diagnosis."""
    def __init__(self):
        """Init function."""
23
        super().__init__()
24
        self.na = 'N/A'
25

26
    def _check_and_format_rules(self, rule, name):
27
28
29
30
31
32
33
34
35
36
        """Check the rule of the metric whether the formart is valid.

        Args:
            rule (dict): the rule
            name (str): the rule name

        Returns:
            dict: the rule for the metric
        """
        # check if rule is supported
37
        super()._check_and_format_rules(rule, name)
38
39
40
41
42
43
44
45
46
47
48
49
50
        if 'store' not in rule:
            if 'function' not in rule:
                logger.log_and_raise(exception=Exception, msg='{} lack of function'.format(name))
            if not isinstance(DiagnosisRuleType(rule['function']), DiagnosisRuleType):
                logger.log_and_raise(exception=Exception, msg='{} invalid function name'.format(name))
            # check rule format
            if 'criteria' not in rule:
                logger.log_and_raise(exception=Exception, msg='{} lack of criteria'.format(name))
            if not isinstance(eval(rule['criteria']), Callable):
                logger.log_and_raise(exception=Exception, msg='invalid criteria format')
            if rule['function'] != 'multi_rules':
                if 'metrics' not in rule:
                    logger.log_and_raise(exception=Exception, msg='{} lack of metrics'.format(name))
51
52
        if 'store' in rule and not isinstance(rule['store'], bool):
            logger.log_and_raise(exception=Exception, msg='{} store must be bool type'.format(name))
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
        return rule

    def _get_baseline_of_metric(self, baseline, metric):
        """Get the baseline value of the metric.

        Args:
            baseline (dict): baseline defined in baseline file
            metric (str): the full name of the metric

        Returns:
            numeric: the baseline value of the metric
        """
        if metric in baseline:
            return baseline[metric]
        else:
68
            short = metric
69
            # exclude rank info, for example, '.*:\d+'->'.*'
70
71
72
73
            if ':' in metric:
                short = metric.strip(metric.split(':')[-1]).strip(':')
            else:
                short = metric.split('/')[0]
74
75
76
77
            if short in baseline:
                return baseline[short]
            # baseline not defined
            else:
78
                return None
79

80
81
    def __get_metrics_and_baseline(self, rule, benchmark_rules, baseline):
        """Get metrics with baseline in the rule.
82

83
84
        Parse metric regex in the rule, and store the (baseline, metric) pair
        in _sb_rules[rule]['metrics'] and metric in _enable_metrics。
85
86

        Args:
87
88
89
90
            rule (str): the name of the rule
            benchmark_rules (dict): the dict of rules
            baseline (dict): the dict of baseline of metrics
        """
91
        if 'function' in self._sb_rules[rule] and self._sb_rules[rule]['function'] == 'multi_rules':
92
            return
93
94
95
        self._get_metrics(rule, benchmark_rules)
        for metric in self._sb_rules[rule]['metrics']:
            self._sb_rules[rule]['metrics'][metric] = self._get_baseline_of_metric(baseline, metric)
96
97
98
99
100
101
102

    def _parse_rules_and_baseline(self, rules, baseline):
        """Parse and merge rules and baseline read from file.

        Args:
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
103
104
105
106
107

        Returns:
            bool: return True if successfully get the criteria for all rules, otherwise False.
        """
        try:
108
            if not rules:
109
                logger.log_and_raise(exception=Exception, msg='DataDiagnosis: get criteria failed')
110
            self._sb_rules = {}
111
            self._enable_metrics = set()
112
            benchmark_rules = rules['superbench']['rules']
113
            self._raw_rules = benchmark_rules
114
            for rule in benchmark_rules:
115
                benchmark_rules[rule] = self._check_and_format_rules(benchmark_rules[rule], rule)
116
                self._sb_rules[rule] = {}
117
                self._sb_rules[rule]['name'] = rule
118
119
                if 'function' in benchmark_rules[rule]:
                    self._sb_rules[rule]['function'] = benchmark_rules[rule]['function']
120
121
                self._sb_rules[rule]['store'] = True if 'store' in benchmark_rules[
                    rule] and benchmark_rules[rule]['store'] is True else False
122
123
                if 'criteria' in benchmark_rules[rule]:
                    self._sb_rules[rule]['criteria'] = benchmark_rules[rule]['criteria']
124
125
                self._sb_rules[rule]['categories'] = benchmark_rules[rule]['categories']
                self._sb_rules[rule]['metrics'] = {}
126
127
                self.__get_metrics_and_baseline(rule, benchmark_rules, baseline)
            self._enable_metrics = sorted(list(self._enable_metrics))
128
        except Exception as e:
129
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: get criteria failed - {}'.format(str(e)))
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

        return True

    def _run_diagnosis_rules_for_single_node(self, node):
        """Use rules to diagnosis single node data.

        Use the rules defined in rule_file to diagnose the raw data of each node,
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and data summary of defective node.

        Args:
            node (str): the node to do the diagosis

        Returns:
            details_row (list): None if the node is not labeled as defective,
                otherwise details of ['Category', 'Defective Details']
            summary_data_row (dict): None if the node is not labeled as defective,
                otherwise data summary of the metrics
        """
        data_row = self._raw_data_df.loc[node]
        issue_label = False
        details = []
        categories = set()
153
        store_values = {}
154
155
156
        summary_data_row = pd.Series(index=self._enable_metrics, name=node, dtype=float)
        # Check each rule
        for rule in self._sb_rules:
157
158
159
160
161
162
            # if no criteria and store is True in a rule, store the value of metrics in the rule
            if self._sb_rules[rule]['store'] and 'criteria' not in self._sb_rules[rule]:
                store_values[rule] = {}
                for metric in self._sb_rules[rule]['metrics']:
                    store_values[rule][metric] = data_row[metric]
                continue
163
164
165
            # Get rule op function and run the rule
            function_name = self._sb_rules[rule]['function']
            rule_op = RuleOp.get_rule_func(DiagnosisRuleType(function_name))
166
167
            violated_num = 0
            if rule_op == RuleOp.multi_rules:
168
                violated_num = rule_op(self._sb_rules[rule], details, categories, store_values)
169
170
171
172
            elif rule_op == RuleOp.failure_check:
                violated_num = rule_op(
                    data_row, self._sb_rules[rule], summary_data_row, details, categories, self._raw_rules[rule]
                )
173
174
            else:
                violated_num = rule_op(data_row, self._sb_rules[rule], summary_data_row, details, categories)
175
            # label the node as defective one
176
            if self._sb_rules[rule]['store']:
177
                store_values[rule] = violated_num
178
            elif violated_num:
179
180
181
                issue_label = True
        if issue_label:
            # Add category information
182
183
            general_cat_str = ','.join(sorted(list(categories)))
            details_cat_str = ','.join(sorted((details)))
184
185
186
187
188
            details_row = [general_cat_str, details_cat_str]
            return details_row, summary_data_row

        return None, None

189
    def run_diagnosis_rules(self, rules, baseline):
190
191
        """Rule-based data diagnosis for multiple nodes' raw data.

192
        Use the rules defined in rules to diagnose the raw data of each node,
193
194
195
196
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and processed data of defective node.

        Args:
197
198
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
199
200
201
202
203

        Returns:
            data_not_accept_df (DataFrame): defective nodes's detailed information
            label_df (DataFrame): labels for all nodes
        """
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
        summary_columns = ['Category', 'Defective Details']
        data_not_accept_df = pd.DataFrame(columns=summary_columns)
        summary_details_df = pd.DataFrame()
        label_df = pd.DataFrame(columns=['label'])
        if not self._parse_rules_and_baseline(rules, baseline):
            return data_not_accept_df, label_df
        # run diagnosis rules for each node
        for node in self._raw_data_df.index:
            details_row, summary_data_row = self._run_diagnosis_rules_for_single_node(node)
            if details_row:
                data_not_accept_df.loc[node] = details_row
                summary_details_df = pd.concat(
                    [summary_details_df,
                     pd.DataFrame([summary_data_row.to_dict()], index=[summary_data_row.name])]
                )
                label_df.loc[node] = 1
            else:
                label_df.loc[node] = 0
        # combine details for defective nodes
        if len(data_not_accept_df) != 0:
            data_not_accept_df = data_not_accept_df.join(summary_details_df)
            data_not_accept_df = data_not_accept_df.sort_values(by=summary_columns, ascending=False)
226
227
228

        return data_not_accept_df, label_df

229
230
231
232
233
234
235
236
    def output_all_nodes_results(self, raw_data_df, data_not_accept_df):
        """Output diagnosis results of all nodes.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information

        Returns:
237
238
            DataFrame: all nodes' detailed information inluding ['Accept','Number Of Issues',
            'Category','Defective Details']
239
        """
240
        append_columns = ['Accept', 'Number Of Issues', 'Category', 'Defective Details']
241
        all_data_df = (raw_data_df).astype('float64')
242
243
244

        if data_not_accept_df.shape[0] == 0:
            all_data_df['Accept'] = [True for i in range(len(all_data_df))]
245
            all_data_df['Number Of Issues'] = [0 for i in range(len(all_data_df))]
246
            all_data_df['Category'] = [None for i in range(len(all_data_df))]
247
            all_data_df['Defective Details'] = [None for i in range(len(all_data_df))]
248
249
250

        elif data_not_accept_df.shape[0] > 0:
            data_not_accept_df['Accept'] = [False for i in range(len(data_not_accept_df))]
251
            data_not_accept_df['Number Of Issues'] = data_not_accept_df['Defective Details'].map(
252
253
                lambda x: len(x.split(','))
            )
254
            for index in range(len(append_columns) - 1, -1, -1):
255
                if append_columns[index] not in data_not_accept_df:
256
257
258
259
                    logger.log_and_raise(
                        Exception,
                        msg='DataDiagnosis: output_all_nodes_results - column {} not found in data_not_accept_df.'.
                        format(append_columns[index])
260
261
                    )
                else:
262
263
264
                    all_data_df = data_not_accept_df[[
                        append_columns[index]
                    ]].merge(all_data_df, left_index=True, right_index=True, how='right')
265
            all_data_df['Accept'] = all_data_df['Accept'].replace(np.nan, True)
266
267
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].replace(np.nan, 0)
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].astype(int)
268
269
270

        return all_data_df

271
272
273
274
275
276
277
278
279
280
    def output_diagnosis_in_excel(self, raw_data_df, data_not_accept_df, output_path, rules):
        """Output the raw_data_df and data_not_accept_df results into excel file.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information
            output_path (str): the path of output excel file
            rules (dict): the rules of DataDiagnosis
        """
        try:
281
            data_not_accept_df = data_not_accept_df.convert_dtypes()
282
283
284
            writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
            # Check whether writer is valiad
            if not isinstance(writer, pd.ExcelWriter):
285
                logger.log_and_raise(exception=IOError, msg='DataDiagnosis: excel_data_output - invalid file path.')
286
287
288
289
            file_handler.output_excel_raw_data(writer, raw_data_df, 'Raw Data')
            file_handler.output_excel_data_not_accept(writer, data_not_accept_df, rules)
            writer.save()
        except Exception as e:
290
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: excel_data_output - {}'.format(str(e)))
291

292
    def output_diagnosis_in_jsonl(self, data_not_accept_df, output_path):
293
294
295
296
297
298
        """Output data_not_accept_df into jsonl file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
299
        data_not_accept_df = data_not_accept_df.convert_dtypes().astype('object').fillna(self.na)
300
301
302
303
304
        p = Path(output_path)
        try:
            data_not_accept_json = data_not_accept_df.to_json(orient='index')
            data_not_accept = json.loads(data_not_accept_json)
            if not isinstance(data_not_accept_df, pd.DataFrame):
305
306
307
                logger.log_and_raise(
                    Exception, msg='DataDiagnosis: output json data - data_not_accept_df is not DataFrame.'
                )
308
            if data_not_accept_df.empty:
309
310
                with p.open('w') as f:
                    pass
311
312
313
314
                return
            with p.open('w') as f:
                for node in data_not_accept:
                    line = data_not_accept[node]
315
                    line['index'] = node
316
317
318
                    json_str = json.dumps(line)
                    f.write(json_str + '\n')
        except Exception as e:
319
320
321
            logger.log_and_raise(
                exception=Exception, msg='DataDiagnosis: output json data failed, msg: {}'.format(str(e))
            )
322

323
324
325
326
327
328
329
    def output_diagnosis_in_json(self, data_not_accept_df, output_path):
        """Output data_not_accept_df into json file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
330
        data_not_accept_df = data_not_accept_df.convert_dtypes().astype('object').fillna(self.na)
331
        data_not_accept_df = data_not_accept_df.reset_index()
332
333
        data_not_accept_df = data_not_accept_df.rename(
            columns={
334
                'Defective Details': 'diagnosis/issue_details',
335
                'Category': 'diagnosis/category',
336
                'Number Of Issues': 'diagnosis/issue_num',
337
338
339
                'Accept': 'diagnosis/accept'
            }
        )
340
341
342
343
344
345
        data_not_accept_json = data_not_accept_df.to_json(orient='records')
        data_not_accept = json.loads(data_not_accept_json)
        p = Path(output_path)
        with p.open('w') as f:
            json.dump(data_not_accept, f, indent=4)

346
    def generate_md_lines(self, data_not_accept_df, rules, round):
347
348
349
350
351
352
353
354
355
356
        """Convert DataFrame into markdown lines.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            rules (dict): the rules of DataDiagnosis
            round (int): the number of decimal digits

        Returns:
            list: lines in markdown format
        """
357
358
        if len(data_not_accept_df) == 0:
            return []
359
        data_not_accept_df = data_not_accept_df.reset_index()
360
361
362
        header = data_not_accept_df.columns.tolist()
        # format precision of values to n decimal digits
        for rule in rules:
363
364
365
366
367
368
369
370
371
372
            if 'function' in rules[rule]:
                for metric in rules[rule]['metrics']:
                    if rules[rule]['function'] == 'variance':
                        if round and isinstance(round, int):
                            data_not_accept_df[metric] = data_not_accept_df[metric].map(
                                lambda x: x * 100, na_action='ignore'
                            )
                            data_not_accept_df = data_analysis.round_significant_decimal_places(
                                data_not_accept_df, round, [metric]
                            )
373
                        data_not_accept_df[metric] = data_not_accept_df[metric].map(
374
                            lambda x: '{}%'.format(x), na_action='ignore'
375
                        )
376
377
378
379
380
                    elif rules[rule]['function'] == 'value':
                        if round and isinstance(round, int):
                            data_not_accept_df = data_analysis.round_significant_decimal_places(
                                data_not_accept_df, round, [metric]
                            )
381
        data_not_accept_df = data_not_accept_df.convert_dtypes().astype('object').fillna(self.na)
382
        lines = file_handler.generate_md_table(data_not_accept_df, header)
383
384
        return lines

385
386
387
    def run(
        self, raw_data_file, rule_file, baseline_file, output_dir, output_format='excel', output_all=False, round=2
    ):
388
389
390
391
392
393
394
        """Run the data diagnosis and output the results.

        Args:
            raw_data_file (str): the path of raw data jsonl file.
            rule_file (str): The path of baseline yaml file
            baseline_file (str): The path of baseline json file
            output_dir (str): the directory of output file
395
            output_all (bool): output diagnosis results for all nodes
396
            output_format (str): the format of the output, 'excel' or 'json'
397
            round (int): the number of decimal digits
398
399
        """
        try:
400
401
            rules = self._preprocess(raw_data_file, rule_file)
            # read baseline
402
            baseline = file_handler.read_baseline(baseline_file) if baseline_file is not None else {}
403
            logger.info('DataDiagnosis: Begin to process {} nodes'.format(len(self._raw_data_df)))
404
            output_df, label_df = self.run_diagnosis_rules(rules, baseline)
405
            logger.info('DataDiagnosis: Processed finished')
406
            output_path = str(Path(output_dir) / f'diagnosis_summary.{output_format}')
407
408
            # generate all nodes' info
            if output_all:
409
                output_df = self.output_all_nodes_results(self._raw_data_df, output_df)
410
            # output according format
411
            if output_format == 'excel':
412
                output_path = str(Path(output_dir) / 'diagnosis_summary.xlsx')
413
                self.output_diagnosis_in_excel(self._raw_data_df, output_df, output_path, self._sb_rules)
414
            elif output_format == 'json':
415
416
417
                self.output_diagnosis_in_json(output_df, output_path)
            elif output_format == 'jsonl':
                self.output_diagnosis_in_jsonl(output_df, output_path)
418
            elif output_format == 'md' or output_format == 'html':
419
                lines = self.generate_md_lines(output_df, self._sb_rules, round)
420
421
422
423
                if output_format == 'md':
                    file_handler.output_lines_in_md(lines, output_path)
                else:
                    file_handler.output_lines_in_html(lines, output_path)
424
            else:
425
426
427
                logger.log_and_raise(
                    exception=Exception, msg='DataDiagnosis: output failed - unsupported output format'
                )
428
429
            logger.info('DataDiagnosis: Output results to {}'.format(output_path))
        except Exception as e:
430
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: run failed - {}'.format(str(e)))