data_diagnosis.py 19.6 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""A module for baseline-based data diagnosis."""
from typing import Callable
6
from pathlib import Path
7
import json
8
9

import pandas as pd
10
import numpy as np
11
12
13

from superbench.common.utils import logger
from superbench.analyzer.diagnosis_rule_op import RuleOp, DiagnosisRuleType
14
from superbench.analyzer import file_handler
15
from superbench.analyzer import RuleBase
16
from superbench.analyzer import data_analysis
17
18


19
class DataDiagnosis(RuleBase):
20
21
22
    """The DataDiagnosis class to do the baseline-based data diagnosis."""
    def __init__(self):
        """Init function."""
23
        super().__init__()
24

25
    def _check_and_format_rules(self, rule, name):
26
27
28
29
30
31
32
33
34
35
        """Check the rule of the metric whether the formart is valid.

        Args:
            rule (dict): the rule
            name (str): the rule name

        Returns:
            dict: the rule for the metric
        """
        # check if rule is supported
36
        super()._check_and_format_rules(rule, name)
37
38
39
40
41
42
43
44
45
46
47
48
49
        if 'store' not in rule:
            if 'function' not in rule:
                logger.log_and_raise(exception=Exception, msg='{} lack of function'.format(name))
            if not isinstance(DiagnosisRuleType(rule['function']), DiagnosisRuleType):
                logger.log_and_raise(exception=Exception, msg='{} invalid function name'.format(name))
            # check rule format
            if 'criteria' not in rule:
                logger.log_and_raise(exception=Exception, msg='{} lack of criteria'.format(name))
            if not isinstance(eval(rule['criteria']), Callable):
                logger.log_and_raise(exception=Exception, msg='invalid criteria format')
            if rule['function'] != 'multi_rules':
                if 'metrics' not in rule:
                    logger.log_and_raise(exception=Exception, msg='{} lack of metrics'.format(name))
50
51
        if 'store' in rule and not isinstance(rule['store'], bool):
            logger.log_and_raise(exception=Exception, msg='{} store must be bool type'.format(name))
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        return rule

    def _get_baseline_of_metric(self, baseline, metric):
        """Get the baseline value of the metric.

        Args:
            baseline (dict): baseline defined in baseline file
            metric (str): the full name of the metric

        Returns:
            numeric: the baseline value of the metric
        """
        if metric in baseline:
            return baseline[metric]
66
67
        elif 'return_code' in metric:
            return 0
68
        else:
69
            short = metric
70
            # exclude rank info, for example, '.*:\d+'->'.*'
71
72
73
74
            if ':' in metric:
                short = metric.strip(metric.split(':')[-1]).strip(':')
            else:
                short = metric.split('/')[0]
75
76
77
78
79
80
81
            if short in baseline:
                return baseline[short]
            # baseline not defined
            else:
                logger.warning('DataDiagnosis: get baseline - {} baseline not found'.format(metric))
                return -1

82
83
    def __get_metrics_and_baseline(self, rule, benchmark_rules, baseline):
        """Get metrics with baseline in the rule.
84

85
86
        Parse metric regex in the rule, and store the (baseline, metric) pair
        in _sb_rules[rule]['metrics'] and metric in _enable_metrics。
87
88

        Args:
89
90
91
92
            rule (str): the name of the rule
            benchmark_rules (dict): the dict of rules
            baseline (dict): the dict of baseline of metrics
        """
93
        if 'function' in self._sb_rules[rule] and self._sb_rules[rule]['function'] == 'multi_rules':
94
            return
95
96
97
        self._get_metrics(rule, benchmark_rules)
        for metric in self._sb_rules[rule]['metrics']:
            self._sb_rules[rule]['metrics'][metric] = self._get_baseline_of_metric(baseline, metric)
98
99
100
101
102
103
104

    def _parse_rules_and_baseline(self, rules, baseline):
        """Parse and merge rules and baseline read from file.

        Args:
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
105
106
107
108
109

        Returns:
            bool: return True if successfully get the criteria for all rules, otherwise False.
        """
        try:
110
            if not rules:
111
                logger.log_and_raise(exception=Exception, msg='DataDiagnosis: get criteria failed')
112
            self._sb_rules = {}
113
            self._enable_metrics = set()
114
            benchmark_rules = rules['superbench']['rules']
115
            self._raw_rules = benchmark_rules
116
            for rule in benchmark_rules:
117
                benchmark_rules[rule] = self._check_and_format_rules(benchmark_rules[rule], rule)
118
                self._sb_rules[rule] = {}
119
                self._sb_rules[rule]['name'] = rule
120
121
                if 'function' in benchmark_rules[rule]:
                    self._sb_rules[rule]['function'] = benchmark_rules[rule]['function']
122
123
                self._sb_rules[rule]['store'] = True if 'store' in benchmark_rules[
                    rule] and benchmark_rules[rule]['store'] is True else False
124
125
                if 'criteria' in benchmark_rules[rule]:
                    self._sb_rules[rule]['criteria'] = benchmark_rules[rule]['criteria']
126
127
                self._sb_rules[rule]['categories'] = benchmark_rules[rule]['categories']
                self._sb_rules[rule]['metrics'] = {}
128
129
                self.__get_metrics_and_baseline(rule, benchmark_rules, baseline)
            self._enable_metrics = sorted(list(self._enable_metrics))
130
        except Exception as e:
131
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: get criteria failed - {}'.format(str(e)))
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

        return True

    def _run_diagnosis_rules_for_single_node(self, node):
        """Use rules to diagnosis single node data.

        Use the rules defined in rule_file to diagnose the raw data of each node,
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and data summary of defective node.

        Args:
            node (str): the node to do the diagosis

        Returns:
            details_row (list): None if the node is not labeled as defective,
                otherwise details of ['Category', 'Defective Details']
            summary_data_row (dict): None if the node is not labeled as defective,
                otherwise data summary of the metrics
        """
        data_row = self._raw_data_df.loc[node]
        issue_label = False
        details = []
        categories = set()
155
        store_values = {}
156
157
158
        summary_data_row = pd.Series(index=self._enable_metrics, name=node, dtype=float)
        # Check each rule
        for rule in self._sb_rules:
159
160
161
162
163
164
            # if no criteria and store is True in a rule, store the value of metrics in the rule
            if self._sb_rules[rule]['store'] and 'criteria' not in self._sb_rules[rule]:
                store_values[rule] = {}
                for metric in self._sb_rules[rule]['metrics']:
                    store_values[rule][metric] = data_row[metric]
                continue
165
166
167
            # Get rule op function and run the rule
            function_name = self._sb_rules[rule]['function']
            rule_op = RuleOp.get_rule_func(DiagnosisRuleType(function_name))
168
169
            violated_num = 0
            if rule_op == RuleOp.multi_rules:
170
                violated_num = rule_op(self._sb_rules[rule], details, categories, store_values)
171
172
173
174
            elif rule_op == RuleOp.failure_check:
                violated_num = rule_op(
                    data_row, self._sb_rules[rule], summary_data_row, details, categories, self._raw_rules[rule]
                )
175
176
            else:
                violated_num = rule_op(data_row, self._sb_rules[rule], summary_data_row, details, categories)
177
            # label the node as defective one
178
            if self._sb_rules[rule]['store']:
179
                store_values[rule] = violated_num
180
            elif violated_num:
181
182
183
                issue_label = True
        if issue_label:
            # Add category information
184
185
            general_cat_str = ','.join(sorted(list(categories)))
            details_cat_str = ','.join(sorted((details)))
186
187
188
189
190
            details_row = [general_cat_str, details_cat_str]
            return details_row, summary_data_row

        return None, None

191
    def run_diagnosis_rules(self, rules, baseline):
192
193
        """Rule-based data diagnosis for multiple nodes' raw data.

194
        Use the rules defined in rules to diagnose the raw data of each node,
195
196
197
198
        if the node violate any rule, label as defective node and save
        the 'Category', 'Defective Details' and processed data of defective node.

        Args:
199
200
            rules (dict): rules from rule yaml file
            baseline (dict): baseline of metrics from baseline json file
201
202
203
204
205

        Returns:
            data_not_accept_df (DataFrame): defective nodes's detailed information
            label_df (DataFrame): labels for all nodes
        """
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
        summary_columns = ['Category', 'Defective Details']
        data_not_accept_df = pd.DataFrame(columns=summary_columns)
        summary_details_df = pd.DataFrame()
        label_df = pd.DataFrame(columns=['label'])
        if not self._parse_rules_and_baseline(rules, baseline):
            return data_not_accept_df, label_df
        # run diagnosis rules for each node
        for node in self._raw_data_df.index:
            details_row, summary_data_row = self._run_diagnosis_rules_for_single_node(node)
            if details_row:
                data_not_accept_df.loc[node] = details_row
                summary_details_df = pd.concat(
                    [summary_details_df,
                     pd.DataFrame([summary_data_row.to_dict()], index=[summary_data_row.name])]
                )
                label_df.loc[node] = 1
            else:
                label_df.loc[node] = 0
        # combine details for defective nodes
        if len(data_not_accept_df) != 0:
            data_not_accept_df = data_not_accept_df.join(summary_details_df)
            data_not_accept_df = data_not_accept_df.sort_values(by=summary_columns, ascending=False)
228
229
230

        return data_not_accept_df, label_df

231
232
233
234
235
236
237
238
    def output_all_nodes_results(self, raw_data_df, data_not_accept_df):
        """Output diagnosis results of all nodes.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information

        Returns:
239
240
            DataFrame: all nodes' detailed information inluding ['Accept','Number Of Issues',
            'Category','Defective Details']
241
        """
242
        append_columns = ['Accept', 'Number Of Issues', 'Category', 'Defective Details']
243
        all_data_df = (raw_data_df).astype('float64')
244
245
246

        if data_not_accept_df.shape[0] == 0:
            all_data_df['Accept'] = [True for i in range(len(all_data_df))]
247
            all_data_df['Number Of Issues'] = [0 for i in range(len(all_data_df))]
248
            all_data_df['Category'] = [None for i in range(len(all_data_df))]
249
            all_data_df['Defective Details'] = [None for i in range(len(all_data_df))]
250
251
252

        elif data_not_accept_df.shape[0] > 0:
            data_not_accept_df['Accept'] = [False for i in range(len(data_not_accept_df))]
253
            data_not_accept_df['Number Of Issues'] = data_not_accept_df['Defective Details'].map(
254
255
                lambda x: len(x.split(','))
            )
256
257
            for index in range(len(append_columns)):
                if append_columns[index] not in data_not_accept_df:
258
259
260
261
                    logger.log_and_raise(
                        Exception,
                        msg='DataDiagnosis: output_all_nodes_results - column {} not found in data_not_accept_df.'.
                        format(append_columns[index])
262
263
264
265
266
267
                    )
                else:
                    all_data_df = all_data_df.merge(
                        data_not_accept_df[[append_columns[index]]], left_index=True, right_index=True, how='left'
                    )
            all_data_df['Accept'] = all_data_df['Accept'].replace(np.nan, True)
268
269
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].replace(np.nan, 0)
            all_data_df['Number Of Issues'] = all_data_df['Number Of Issues'].astype(int)
270
271
272
273
274

        all_data_df = all_data_df.replace(np.nan, '')

        return all_data_df

275
276
277
278
279
280
281
282
283
284
285
286
287
    def output_diagnosis_in_excel(self, raw_data_df, data_not_accept_df, output_path, rules):
        """Output the raw_data_df and data_not_accept_df results into excel file.

        Args:
            raw_data_df (DataFrame): raw data
            data_not_accept_df (DataFrame): defective nodes's detailed information
            output_path (str): the path of output excel file
            rules (dict): the rules of DataDiagnosis
        """
        try:
            writer = pd.ExcelWriter(output_path, engine='xlsxwriter')
            # Check whether writer is valiad
            if not isinstance(writer, pd.ExcelWriter):
288
                logger.log_and_raise(exception=IOError, msg='DataDiagnosis: excel_data_output - invalid file path.')
289
290
291
292
            file_handler.output_excel_raw_data(writer, raw_data_df, 'Raw Data')
            file_handler.output_excel_data_not_accept(writer, data_not_accept_df, rules)
            writer.save()
        except Exception as e:
293
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: excel_data_output - {}'.format(str(e)))
294

295
    def output_diagnosis_in_jsonl(self, data_not_accept_df, output_path):
296
297
298
299
300
301
302
303
304
305
306
        """Output data_not_accept_df into jsonl file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
        p = Path(output_path)
        try:
            data_not_accept_json = data_not_accept_df.to_json(orient='index')
            data_not_accept = json.loads(data_not_accept_json)
            if not isinstance(data_not_accept_df, pd.DataFrame):
307
308
309
                logger.log_and_raise(
                    Exception, msg='DataDiagnosis: output json data - data_not_accept_df is not DataFrame.'
                )
310
            if data_not_accept_df.empty:
311
312
                with p.open('w') as f:
                    pass
313
314
315
316
317
318
319
320
                return
            with p.open('w') as f:
                for node in data_not_accept:
                    line = data_not_accept[node]
                    line['Index'] = node
                    json_str = json.dumps(line)
                    f.write(json_str + '\n')
        except Exception as e:
321
322
323
            logger.log_and_raise(
                exception=Exception, msg='DataDiagnosis: output json data failed, msg: {}'.format(str(e))
            )
324

325
326
327
328
329
330
331
332
    def output_diagnosis_in_json(self, data_not_accept_df, output_path):
        """Output data_not_accept_df into json file.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            output_path (str): the path of output jsonl file
        """
        data_not_accept_df['Index'] = data_not_accept_df.index
333
334
        data_not_accept_df = data_not_accept_df.rename(
            columns={
335
                'Defective Details': 'diagnosis/issue_details',
336
                'Category': 'diagnosis/category',
337
                'Number Of Issues': 'diagnosis/issue_num',
338
339
340
                'Accept': 'diagnosis/accept'
            }
        )
341
342
343
344
345
346
        data_not_accept_json = data_not_accept_df.to_json(orient='records')
        data_not_accept = json.loads(data_not_accept_json)
        p = Path(output_path)
        with p.open('w') as f:
            json.dump(data_not_accept, f, indent=4)

347
    def generate_md_lines(self, data_not_accept_df, rules, round):
348
349
350
351
352
353
354
355
356
357
        """Convert DataFrame into markdown lines.

        Args:
            data_not_accept_df (DataFrame): the DataFrame to output
            rules (dict): the rules of DataDiagnosis
            round (int): the number of decimal digits

        Returns:
            list: lines in markdown format
        """
358
359
        if len(data_not_accept_df) == 0:
            return []
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
        data_not_accept_df['machine'] = data_not_accept_df.index
        header = data_not_accept_df.columns.tolist()
        header = header[-1:] + header[:-1]
        data_not_accept_df = data_not_accept_df[header]
        # format precision of values to n decimal digits
        for rule in rules:
            for metric in rules[rule]['metrics']:
                if rules[rule]['function'] == 'variance':
                    if round and isinstance(round, int):
                        data_not_accept_df[metric] = data_not_accept_df[metric].map(
                            lambda x: x * 100, na_action='ignore'
                        )
                        data_not_accept_df = data_analysis.round_significant_decimal_places(
                            data_not_accept_df, round, [metric]
                        )
                    data_not_accept_df[metric] = data_not_accept_df[metric].map(
                        lambda x: '{}%'.format(x), na_action='ignore'
                    )
                elif rules[rule]['function'] == 'value':
                    if round and isinstance(round, int):
                        data_not_accept_df = data_analysis.round_significant_decimal_places(
                            data_not_accept_df, round, [metric]
                        )
383
        lines = file_handler.generate_md_table(data_not_accept_df, header)
384
385
        return lines

386
387
388
    def run(
        self, raw_data_file, rule_file, baseline_file, output_dir, output_format='excel', output_all=False, round=2
    ):
389
390
391
392
393
394
395
        """Run the data diagnosis and output the results.

        Args:
            raw_data_file (str): the path of raw data jsonl file.
            rule_file (str): The path of baseline yaml file
            baseline_file (str): The path of baseline json file
            output_dir (str): the directory of output file
396
            output_all (bool): output diagnosis results for all nodes
397
            output_format (str): the format of the output, 'excel' or 'json'
398
            round (int): the number of decimal digits
399
400
        """
        try:
401
402
403
            rules = self._preprocess(raw_data_file, rule_file)
            # read baseline
            baseline = file_handler.read_baseline(baseline_file)
404
            logger.info('DataDiagnosis: Begin to process {} nodes'.format(len(self._raw_data_df)))
405
            output_df, label_df = self.run_diagnosis_rules(rules, baseline)
406
            logger.info('DataDiagnosis: Processed finished')
407
            output_path = str(Path(output_dir) / f'diagnosis_summary.{output_format}')
408
409
            # generate all nodes' info
            if output_all:
410
                output_df = self.output_all_nodes_results(self._raw_data_df, output_df)
411
            # output according format
412
            if output_format == 'excel':
413
                output_path = str(Path(output_dir) / 'diagnosis_summary.xlsx')
414
                self.output_diagnosis_in_excel(self._raw_data_df, output_df, output_path, self._sb_rules)
415
            elif output_format == 'json':
416
417
418
                self.output_diagnosis_in_json(output_df, output_path)
            elif output_format == 'jsonl':
                self.output_diagnosis_in_jsonl(output_df, output_path)
419
            elif output_format == 'md' or output_format == 'html':
420
                lines = self.generate_md_lines(output_df, self._sb_rules, round)
421
422
423
424
                if output_format == 'md':
                    file_handler.output_lines_in_md(lines, output_path)
                else:
                    file_handler.output_lines_in_html(lines, output_path)
425
            else:
426
427
428
                logger.log_and_raise(
                    exception=Exception, msg='DataDiagnosis: output failed - unsupported output format'
                )
429
430
            logger.info('DataDiagnosis: Output results to {}'.format(output_path))
        except Exception as e:
431
            logger.log_and_raise(exception=Exception, msg='DataDiagnosis: run failed - {}'.format(str(e)))