notification_service.py 40.7 KB
Newer Older
Lysandre Debut's avatar
Lysandre Debut committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
16
17
18
19
import ast
import collections
import functools
import json
import operator
Lysandre Debut's avatar
Lysandre Debut committed
20
21
22
import os
import re
import sys
23
24
import time
from typing import Dict, List, Optional, Union
Lysandre Debut's avatar
Lysandre Debut committed
25

26
import requests
27
from get_ci_error_statistics import get_job_links
28
from get_previous_daily_ci import get_last_daily_ci_reports
Lysandre Debut's avatar
Lysandre Debut committed
29
30
31
from slack_sdk import WebClient


32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])

NON_MODEL_TEST_MODULES = [
    "benchmark",
    "deepspeed",
    "extended",
    "fixtures",
    "generation",
    "onnx",
    "optimization",
    "pipelines",
    "sagemaker",
    "trainer",
    "utils",
]


Lysandre Debut's avatar
Lysandre Debut committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def handle_test_results(test_results):
    expressions = test_results.split(" ")

    failed = 0
    success = 0

    # When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
    # When it is too long, those signs are not present.
    time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]

    for i, expression in enumerate(expressions):
        if "failed" in expression:
            failed += int(expressions[i - 1])
        if "passed" in expression:
            success += int(expressions[i - 1])

    return failed, success, time_spent


68
69
70
71
72
73
def handle_stacktraces(test_results):
    # These files should follow the following architecture:
    # === FAILURES ===
    # <path>:<line>: Error ...
    # <path>:<line>: Error ...
    # <empty line>
Lysandre Debut's avatar
Lysandre Debut committed
74

75
76
77
78
79
80
    total_stacktraces = test_results.split("\n")[1:-1]
    stacktraces = []
    for stacktrace in total_stacktraces:
        try:
            line = stacktrace[: stacktrace.index(" ")].split(":")[-2]
            error_message = stacktrace[stacktrace.index(" ") :]
Lysandre Debut's avatar
Lysandre Debut committed
81

82
83
84
            stacktraces.append(f"(line {line}) {error_message}")
        except Exception:
            stacktraces.append("Cannot retrieve error message.")
Lysandre Debut's avatar
Lysandre Debut committed
85

86
    return stacktraces
Lysandre Debut's avatar
Lysandre Debut committed
87

Lysandre Debut's avatar
Lysandre Debut committed
88

89
90
91
def dicts_to_sum(objects: Union[Dict[str, Dict], List[dict]]):
    if isinstance(objects, dict):
        lists = objects.values()
Lysandre Debut's avatar
Lysandre Debut committed
92
    else:
93
94
95
96
97
98
99
100
101
        lists = objects

    # Convert each dictionary to counter
    counters = map(collections.Counter, lists)
    # Sum all the counters
    return functools.reduce(operator.add, counters)


class Message:
102
103
104
    def __init__(
        self, title: str, ci_title: str, model_results: Dict, additional_results: Dict, selected_warnings: List = None
    ):
105
        self.title = title
106
        self.ci_title = ci_title
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141

        # Failures and success of the modeling tests
        self.n_model_success = sum(r["success"] for r in model_results.values())
        self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values())
        self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values())

        # Some suites do not have a distinction between single and multi GPU.
        self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values())
        self.n_model_failures = (
            self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures
        )

        # Failures and success of the additional tests
        self.n_additional_success = sum(r["success"] for r in additional_results.values())

        all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()])
        self.n_additional_single_gpu_failures = all_additional_failures["single"]
        self.n_additional_multi_gpu_failures = all_additional_failures["multi"]
        self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"]
        self.n_additional_failures = (
            self.n_additional_single_gpu_failures
            + self.n_additional_multi_gpu_failures
            + self.n_additional_unknown_gpu_failures
        )

        # Results
        self.n_failures = self.n_model_failures + self.n_additional_failures
        self.n_success = self.n_model_success + self.n_additional_success
        self.n_tests = self.n_failures + self.n_success

        self.model_results = model_results
        self.additional_results = additional_results

        self.thread_ts = None

142
143
144
145
        if selected_warnings is None:
            selected_warnings = []
        self.selected_warnings = selected_warnings

146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    @property
    def time(self) -> str:
        all_results = [*self.model_results.values(), *self.additional_results.values()]
        time_spent = [r["time_spent"].split(", ")[0] for r in all_results if len(r["time_spent"])]
        total_secs = 0

        for time in time_spent:
            time_parts = time.split(":")

            # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
            if len(time_parts) == 1:
                time_parts = [0, 0, time_parts[0]]

            hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
            total_secs += hours * 3600 + minutes * 60 + seconds

        hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
        return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"

    @property
    def header(self) -> Dict:
        return {"type": "header", "text": {"type": "plain_text", "text": self.title}}

169
170
171
172
    @property
    def ci_title_section(self) -> Dict:
        return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}}

173
174
175
176
177
178
179
180
    @property
    def no_failures(self) -> Dict:
        return {
            "type": "section",
            "text": {
                "type": "plain_text",
                "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
                "emoji": True,
Lysandre Debut's avatar
Lysandre Debut committed
181
            },
182
183
184
185
            "accessory": {
                "type": "button",
                "text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
                "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
Lysandre Debut's avatar
Lysandre Debut committed
186
            },
187
188
189
190
191
192
193
194
        }

    @property
    def failures(self) -> Dict:
        return {
            "type": "section",
            "text": {
                "type": "plain_text",
Sylvain Gugger's avatar
Sylvain Gugger committed
195
196
197
198
                "text": (
                    f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
                    f" {self.time}."
                ),
199
                "emoji": True,
Lysandre Debut's avatar
Lysandre Debut committed
200
            },
201
202
203
204
            "accessory": {
                "type": "button",
                "text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
                "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
Lysandre Debut's avatar
Lysandre Debut committed
205
            },
Lysandre Debut's avatar
Lysandre Debut committed
206
        }
207

208
209
    @property
    def warnings(self) -> Dict:
210
211
212
213
214
215
216
217
218
        # If something goes wrong, let's avoid the CI report failing to be sent.
        button_text = "Check warnings (Link not found)"
        # Use the workflow run link
        job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"
        if "Extract warnings in CI artifacts" in github_actions_job_links:
            button_text = "Check warnings"
            # Use the actual job link
            job_link = f"{github_actions_job_links['Extract warnings in CI artifacts']}"

219
220
221
222
        huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x]
        text = f"There are {len(self.selected_warnings)} warnings being selected."
        text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`."

223
224
225
226
        return {
            "type": "section",
            "text": {
                "type": "plain_text",
227
                "text": text,
228
229
230
231
                "emoji": True,
            },
            "accessory": {
                "type": "button",
232
233
                "text": {"type": "plain_text", "text": button_text, "emoji": True},
                "url": job_link,
234
235
236
            },
        }

237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
    @staticmethod
    def get_device_report(report, rjust=6):
        if "single" in report and "multi" in report:
            return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
        elif "single" in report:
            return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | "
        elif "multi" in report:
            return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "

    @property
    def category_failures(self) -> Dict:
        model_failures = [v["failed"] for v in self.model_results.values()]

        category_failures = {}

        for model_failure in model_failures:
            for key, value in model_failure.items():
                if key not in category_failures:
                    category_failures[key] = dict(value)
                else:
                    category_failures[key]["unclassified"] += value["unclassified"]
                    category_failures[key]["single"] += value["single"]
                    category_failures[key]["multi"] += value["multi"]

        individual_reports = []
        for key, value in category_failures.items():
            device_report = self.get_device_report(value)

            if sum(value.values()):
                if device_report:
                    individual_reports.append(f"{device_report}{key}")
                else:
                    individual_reports.append(key)

        header = "Single |  Multi | Category\n"
Yih-Dar's avatar
Yih-Dar committed
272
273
274
        category_failures_report = prepare_reports(
            title="The following modeling categories had failures", header=header, reports=individual_reports
        )
275

Yih-Dar's avatar
Yih-Dar committed
276
        return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}}
277

278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
    def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report):  # noqa
        # Remove the leading and training parts that don't contain failure count information.
        model_failures = curr_failure_report.split("\n")[3:-2]
        prev_model_failures = prev_failure_report.split("\n")[3:-2]
        entries_changed = set(model_failures).difference(prev_model_failures)

        prev_map = {}
        for f in prev_model_failures:
            items = [x.strip() for x in f.split("| ")]
            prev_map[items[-1]] = [int(x) for x in items[:-1]]

        curr_map = {}
        for f in entries_changed:
            items = [x.strip() for x in f.split("| ")]
            curr_map[items[-1]] = [int(x) for x in items[:-1]]

        diff_map = {}
        for k, v in curr_map.items():
            if k not in prev_map:
                diff_map[k] = v
            else:
                diff = [x - y for x, y in zip(v, prev_map[k])]
                if max(diff) > 0:
                    diff_map[k] = diff

        entries_changed = []
        for model_name, diff_values in diff_map.items():
            diff = [str(x) for x in diff_values]
            diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff]
            diff = [x.rjust(9) for x in diff]
            device_report = " | ".join(diff) + " | "
            report = f"{device_report}{model_name}"
            entries_changed.append(report)
        entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1])

        return entries_changed

315
316
317
318
319
320
    @property
    def model_failures(self) -> Dict:
        # Obtain per-model failures
        def per_model_sum(model_category_dict):
            return dicts_to_sum(model_category_dict["failed"].values())

321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
        failures = {}
        non_model_failures = {
            k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values())
        }

        for k, v in self.model_results.items():
            if k in NON_MODEL_TEST_MODULES:
                pass

            if sum(per_model_sum(v).values()):
                dict_failed = dict(v["failed"])
                pytorch_specific_failures = dict_failed.pop("PyTorch")
                tensorflow_specific_failures = dict_failed.pop("TensorFlow")
                other_failures = dicts_to_sum(dict_failed.values())

                failures[k] = {
                    "PyTorch": pytorch_specific_failures,
                    "TensorFlow": tensorflow_specific_failures,
                    "other": other_failures,
                }
341
342
343
344

        model_reports = []
        other_module_reports = []

345
346
347
348
349
350
351
352
353
        for key, value in non_model_failures.items():
            if key in NON_MODEL_TEST_MODULES:
                device_report = self.get_device_report(value)

                if sum(value.values()):
                    if device_report:
                        report = f"{device_report}{key}"
                    else:
                        report = key
354
355
356

                    other_module_reports.append(report)

357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
        for key, value in failures.items():
            device_report_values = [
                value["PyTorch"]["single"],
                value["PyTorch"]["multi"],
                value["TensorFlow"]["single"],
                value["TensorFlow"]["multi"],
                sum(value["other"].values()),
            ]

            if sum(device_report_values):
                device_report = " | ".join([str(x).rjust(9) for x in device_report_values]) + " | "
                report = f"{device_report}{key}"

                model_reports.append(report)

372
        # (Possibly truncated) reports for the current workflow run - to be sent to Slack channels
373
        model_header = "Single PT |  Multi PT | Single TF |  Multi TF |     Other | Category\n"
374
        sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1])
Yih-Dar's avatar
Yih-Dar committed
375
376
377
        model_failures_report = prepare_reports(
            title="These following model modules had failures", header=model_header, reports=sorted_model_reports
        )
378
379

        module_header = "Single |  Multi | Category\n"
380
        sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1])
Yih-Dar's avatar
Yih-Dar committed
381
382
383
        module_failures_report = prepare_reports(
            title="The following non-model modules had failures", header=module_header, reports=sorted_module_reports
        )
384

385
        # To be sent to Slack channels
Yih-Dar's avatar
Yih-Dar committed
386
387
388
389
        model_failure_sections = [
            {"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}},
            {"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}},
        ]
390

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
        # Save the complete (i.e. no truncation) failure tables (of the current workflow run)
        # (to be uploaded as artifacts)
        if not os.path.isdir(os.path.join(os.getcwd(), "test_failure_tables")):
            os.makedirs(os.path.join(os.getcwd(), "test_failure_tables"))

        model_failures_report = prepare_reports(
            title="These following model modules had failures",
            header=model_header,
            reports=sorted_model_reports,
            to_truncate=False,
        )
        file_path = os.path.join(os.getcwd(), "test_failure_tables/model_failures_report.txt")
        with open(file_path, "w", encoding="UTF-8") as fp:
            fp.write(model_failures_report)

        module_failures_report = prepare_reports(
            title="The following non-model modules had failures",
            header=module_header,
            reports=sorted_module_reports,
            to_truncate=False,
        )
        file_path = os.path.join(os.getcwd(), "test_failure_tables/module_failures_report.txt")
        with open(file_path, "w", encoding="UTF-8") as fp:
            fp.write(module_failures_report)

        target_workflow = "huggingface/transformers/.github/workflows/self-scheduled.yml@refs/heads/main"
        if os.environ.get("CI_WORKFLOW_REF") == target_workflow:
            # Get the last previously completed CI's failure tables
            artifact_names = ["test_failure_tables"]
            output_dir = os.path.join(os.getcwd(), "previous_reports")
            os.makedirs(output_dir, exist_ok=True)
            prev_tables = get_last_daily_ci_reports(
                artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"]
Yih-Dar's avatar
Yih-Dar committed
424
            )
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451

            # The last run doesn't produce `test_failure_tables` (by some issues or have no model failure at all)
            if len(prev_tables) > 0:
                # Compute the difference of the previous/current (model failure) table
                prev_model_failures = prev_tables["test_failure_tables"]["model_failures_report.txt"]
                entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures)
                if len(entries_changed) > 0:
                    # Save the complete difference
                    diff_report = prepare_reports(
                        title="Changed model modules failures",
                        header=model_header,
                        reports=entries_changed,
                        to_truncate=False,
                    )
                    file_path = os.path.join(os.getcwd(), "test_failure_tables/changed_model_failures_report.txt")
                    with open(file_path, "w", encoding="UTF-8") as fp:
                        fp.write(diff_report)

                    # To be sent to Slack channels
                    diff_report = prepare_reports(
                        title="*Changed model modules failures*",
                        header=model_header,
                        reports=entries_changed,
                    )
                    model_failure_sections.append(
                        {"type": "section", "text": {"type": "mrkdwn", "text": diff_report}},
                    )
452

Yih-Dar's avatar
Yih-Dar committed
453
        return model_failure_sections
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473

    @property
    def additional_failures(self) -> Dict:
        failures = {k: v["failed"] for k, v in self.additional_results.items()}
        errors = {k: v["error"] for k, v in self.additional_results.items()}

        individual_reports = []
        for key, value in failures.items():
            device_report = self.get_device_report(value)

            if sum(value.values()) or errors[key]:
                report = f"{key}"
                if errors[key]:
                    report = f"[Errored out] {report}"
                if device_report:
                    report = f"{device_report}{report}"

                individual_reports.append(report)

        header = "Single |  Multi | Category\n"
Yih-Dar's avatar
Yih-Dar committed
474
475
476
        failures_report = prepare_reports(
            title="The following non-modeling tests had failures", header=header, reports=individual_reports
        )
477

Yih-Dar's avatar
Yih-Dar committed
478
        return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}}
Lysandre Debut's avatar
Lysandre Debut committed
479

480
481
482
483
    @property
    def payload(self) -> str:
        blocks = [self.header]

484
485
486
        if self.ci_title:
            blocks.append(self.ci_title_section)

487
488
489
490
        if self.n_model_failures > 0 or self.n_additional_failures > 0:
            blocks.append(self.failures)

        if self.n_model_failures > 0:
Yih-Dar's avatar
Yih-Dar committed
491
492
493
494
            blocks.append(self.category_failures)
            for block in self.model_failures:
                if block["text"]["text"]:
                    blocks.append(block)
495
496
497
498
499
500
501

        if self.n_additional_failures > 0:
            blocks.append(self.additional_failures)

        if self.n_model_failures == 0 and self.n_additional_failures == 0:
            blocks.append(self.no_failures)

502
503
504
        if len(self.selected_warnings) > 0:
            blocks.append(self.warnings)

505
506
507
        return json.dumps(blocks)

    @staticmethod
508
    def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False):
509
510
511
512
513
514
515
516
        blocks = []
        title_block = {"type": "header", "text": {"type": "plain_text", "text": title}}
        blocks.append(title_block)

        if ci_title:
            ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}}
            blocks.append(ci_title_block)

517
        offline_runners = []
518
519
        if runner_not_available:
            text = "💔 CI runners are not available! Tests are not run. 😭"
520
521
522
            result = os.environ.get("OFFLINE_RUNNERS")
            if result is not None:
                offline_runners = json.loads(result)
523
524
        elif runner_failed:
            text = "💔 CI runners have problems! Tests are not run. 😭"
525
526
        elif setup_failed:
            text = "💔 Setup job failed. Tests are not run. 😭"
527
528
529
530
531
532
533
534
535
536
        else:
            text = "💔 There was an issue running the tests. 😭"

        error_block_1 = {
            "type": "header",
            "text": {
                "type": "plain_text",
                "text": text,
            },
        }
537
538
539
540
541
542
543

        text = ""
        if len(offline_runners) > 0:
            text = "\n  • " + "\n  • ".join(offline_runners)
            text = f"The following runners are offline:\n{text}\n\n"
        text += "🙏 Let's fix it ASAP! 🙏"

544
545
546
547
        error_block_2 = {
            "type": "section",
            "text": {
                "type": "plain_text",
548
                "text": text,
549
550
551
552
553
554
555
556
557
558
            },
            "accessory": {
                "type": "button",
                "text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
                "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
            },
        }
        blocks.extend([error_block_1, error_block_2])

        payload = json.dumps(blocks)
559
560

        print("Sending the following payload")
561
        print(json.dumps({"blocks": blocks}))
562
563

        client.chat_postMessage(
Yih-Dar's avatar
Yih-Dar committed
564
            channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
565
            text=text,
566
567
568
569
            blocks=payload,
        )

    def post(self):
570
        payload = self.payload
571
        print("Sending the following payload")
572
        print(json.dumps({"blocks": json.loads(payload)}))
573
574
575
576

        text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."

        self.thread_ts = client.chat_postMessage(
Yih-Dar's avatar
Yih-Dar committed
577
            channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
578
            blocks=payload,
579
580
581
582
            text=text,
        )

    def get_reply_blocks(self, job_name, job_result, failures, device, text):
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
        """
        failures: A list with elements of the form {"line": full test name, "trace": error trace}
        """
        # `text` must be less than 3001 characters in Slack SDK
        # keep some room for adding "[Truncated]" when necessary
        MAX_ERROR_TEXT = 3000 - len("[Truncated]")

        failure_text = ""
        for idx, error in enumerate(failures):
            new_text = failure_text + f'*{error["line"]}*\n_{error["trace"]}_\n\n'
            if len(new_text) > MAX_ERROR_TEXT:
                # `failure_text` here has length <= 3000
                failure_text = failure_text + "[Truncated]"
                break
            # `failure_text` here has length <= MAX_ERROR_TEXT
            failure_text = new_text
599
600
601
602
603
604
605

        title = job_name
        if device is not None:
            title += f" ({device}-gpu)"

        content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}

606
607
608
609
610
611
        # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending)
        # Currently we get the device from a job's artifact name.
        # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`.
        # This could be done by adding `machine_type` in a job's `strategy`.
        # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`)
        if job_result["job_link"] is not None and job_result["job_link"][device] is not None:
612
613
614
            content["accessory"] = {
                "type": "button",
                "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
615
                "url": job_result["job_link"][device],
616
617
618
619
620
            }

        return [
            {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
            content,
621
            {"type": "section", "text": {"type": "mrkdwn", "text": failure_text}},
622
623
624
625
626
627
628
629
630
631
632
633
634
        ]

    def post_reply(self):
        if self.thread_ts is None:
            raise ValueError("Can only post reply if a post has been made.")

        sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
        for job, job_result in sorted_dict:
            if len(job_result["failures"]):
                for device, failures in job_result["failures"].items():
                    text = "\n".join(
                        sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]])
                    )
Lysandre Debut's avatar
Lysandre Debut committed
635

636
                    blocks = self.get_reply_blocks(job, job_result, failures, device, text=text)
Lysandre Debut's avatar
Lysandre Debut committed
637

638
639
                    print("Sending the following reply")
                    print(json.dumps({"blocks": blocks}))
Lysandre Debut's avatar
Lysandre Debut committed
640

641
                    client.chat_postMessage(
Yih-Dar's avatar
Yih-Dar committed
642
                        channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
643
644
645
646
                        text=f"Results for {job}",
                        blocks=blocks,
                        thread_ts=self.thread_ts["ts"],
                    )
Lysandre Debut's avatar
Lysandre Debut committed
647

648
649
650
                    time.sleep(1)

        for job, job_result in self.additional_results.items():
Lysandre Debut's avatar
Lysandre Debut committed
651
            if len(job_result["failures"]):
652
653
654
655
656
657
658
659
660
661
662
663
664
                for device, failures in job_result["failures"].items():
                    blocks = self.get_reply_blocks(
                        job,
                        job_result,
                        failures,
                        device,
                        text=f"Number of failures: {sum(job_result['failed'].values())}",
                    )

                    print("Sending the following reply")
                    print(json.dumps({"blocks": blocks}))

                    client.chat_postMessage(
Yih-Dar's avatar
Yih-Dar committed
665
                        channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"],
666
667
668
669
670
671
672
673
                        text=f"Results for {job}",
                        blocks=blocks,
                        thread_ts=self.thread_ts["ts"],
                    )

                    time.sleep(1)


674
def retrieve_artifact(artifact_path: str, gpu: Optional[str]):
675
676
677
678
679
    if gpu not in [None, "single", "multi"]:
        raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.")

    _artifact = {}

680
681
    if os.path.exists(artifact_path):
        files = os.listdir(artifact_path)
682
683
        for file in files:
            try:
684
                with open(os.path.join(artifact_path, file)) as f:
685
686
                    _artifact[file.split(".")[0]] = f.read()
            except UnicodeDecodeError as e:
687
                raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709

    return _artifact


def retrieve_available_artifacts():
    class Artifact:
        def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False):
            self.name = name
            self.single_gpu = single_gpu
            self.multi_gpu = multi_gpu
            self.paths = []

        def __str__(self):
            return self.name

        def add_path(self, path: str, gpu: str = None):
            self.paths.append({"name": self.name, "path": path, "gpu": gpu})

    _available_artifacts: Dict[str, Artifact] = {}

    directories = filter(os.path.isdir, os.listdir())
    for directory in directories:
710
711
712
713
714
715
716
717
        artifact_name = directory

        name_parts = artifact_name.split("_postfix_")
        if len(name_parts) > 1:
            artifact_name = name_parts[0]

        if artifact_name.startswith("single-gpu"):
            artifact_name = artifact_name[len("single-gpu") + 1 :]
718
719
720
721
722
723
724
725

            if artifact_name in _available_artifacts:
                _available_artifacts[artifact_name].single_gpu = True
            else:
                _available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True)

            _available_artifacts[artifact_name].add_path(directory, gpu="single")

726
        elif artifact_name.startswith("multi-gpu"):
Yih-Dar's avatar
Yih-Dar committed
727
            artifact_name = directory[len("multi-gpu") + 1 :]
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743

            if artifact_name in _available_artifacts:
                _available_artifacts[artifact_name].multi_gpu = True
            else:
                _available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True)

            _available_artifacts[artifact_name].add_path(directory, gpu="multi")
        else:
            if artifact_name not in _available_artifacts:
                _available_artifacts[artifact_name] = Artifact(artifact_name)

            _available_artifacts[artifact_name].add_path(directory)

    return _available_artifacts


Yih-Dar's avatar
Yih-Dar committed
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
def prepare_reports(title, header, reports, to_truncate=True):
    report = ""

    MAX_ERROR_TEXT = 3000 - len("[Truncated]")
    if not to_truncate:
        MAX_ERROR_TEXT = float("inf")

    if len(reports) > 0:
        # `text` must be less than 3001 characters in Slack SDK
        # keep some room for adding "[Truncated]" when necessary

        for idx in range(len(reports)):
            _report = header + "\n".join(reports[: idx + 1])
            new_report = f"{title}:\n```\n{_report}\n```\n"
            if len(new_report) > MAX_ERROR_TEXT:
                # `report` here has length <= 3000
                report = report + "[Truncated]"
                break
            report = new_report

    return report


767
if __name__ == "__main__":
768
    runner_status = os.environ.get("RUNNER_STATUS")
769
770
771
772
773
    runner_env_status = os.environ.get("RUNNER_ENV_STATUS")
    setup_status = os.environ.get("SETUP_STATUS")

    runner_not_available = True if runner_status is not None and runner_status != "success" else False
    runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False
774
775
    setup_failed = True if setup_status is not None and setup_status != "success" else False

776
777
778
779
    org = "huggingface"
    repo = "transformers"
    repository_full_name = f"{org}/{repo}"

Yih-Dar's avatar
Yih-Dar committed
780
781
782
    # This env. variable is set in workflow file (under the job `send_results`).
    ci_event = os.environ["CI_EVENT"]

783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
    # To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)`
    pr_number_re = re.compile(r"\(#(\d+)\)$")

    title = f"🤗 Results of the {ci_event} tests."
    # Add Commit/PR title with a link for push CI
    # (check the title in 2 env. variables - depending on the CI is triggered via `push` or `workflow_run` event)
    ci_title_push = os.environ.get("CI_TITLE_PUSH")
    ci_title_workflow_run = os.environ.get("CI_TITLE_WORKFLOW_RUN")
    ci_title = ci_title_push if ci_title_push else ci_title_workflow_run

    ci_sha = os.environ.get("CI_SHA")

    ci_url = None
    if ci_sha:
        ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}"

    if ci_title is not None:
        if ci_url is None:
            raise ValueError(
                "When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by "
                "another `push` event), and the commit SHA has to be provided in order to create the URL to the "
                "commit page."
            )
        ci_title = ci_title.strip().split("\n")[0].strip()

        # Retrieve the PR title and author login to complete the report
        commit_number = ci_url.split("/")[-1]
        ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}"
        ci_details = requests.get(ci_detail_url).json()
        ci_author = ci_details["author"]["login"]

        merged_by = None
        # Find the PR number (if any) and change the url to the actual PR page.
        numbers = pr_number_re.findall(ci_title)
        if len(numbers) > 0:
            pr_number = numbers[0]
            ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}"
            ci_details = requests.get(ci_detail_url).json()

            ci_author = ci_details["user"]["login"]
            ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}"

            merged_by = ci_details["merged_by"]["login"]

        if merged_by is None:
            ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author}"
        else:
            ci_title = f"<{ci_url}|{ci_title}>\nAuthor: {ci_author} | Merged by: {merged_by}"

832
833
834
    elif ci_sha:
        ci_title = f"<{ci_url}|commit: {ci_sha}>"

835
836
837
    else:
        ci_title = ""

838
839
    if runner_not_available or runner_failed or setup_failed:
        Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed)
840
841
        exit(0)

842
843
844
    arguments = sys.argv[1:][0]
    try:
        models = ast.literal_eval(arguments)
Yih-Dar's avatar
Yih-Dar committed
845
846
        # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names).
        models = [x.replace("models/", "models_") for x in models]
847
    except SyntaxError:
848
        Message.error_out(title, ci_title)
849
850
        raise ValueError("Errored out.")

851
852
853
    github_actions_job_links = get_job_links(
        workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"]
    )
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
    available_artifacts = retrieve_available_artifacts()

    modeling_categories = [
        "PyTorch",
        "TensorFlow",
        "Flax",
        "Tokenizers",
        "Pipelines",
        "Trainer",
        "ONNX",
        "Auto",
        "Unclassified",
    ]

    # This dict will contain all the information relative to each model:
    # - Failures: the total, as well as the number of failures per-category defined above
    # - Success: total
    # - Time spent: as a comma-separated list of elapsed time
    # - Failures: as a line-break separated list of errors
    model_results = {
        model: {
            "failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in modeling_categories},
            "success": 0,
            "time_spent": "",
            "failures": {},
879
            "job_link": {},
880
881
882
883
884
885
886
        }
        for model in models
        if f"run_all_tests_gpu_{model}_test_reports" in available_artifacts
    }

    unclassified_model_failures = []

Yih-Dar's avatar
Yih-Dar committed
887
888
889
890
891
892
893
    # This prefix is used to get job links below. For past CI, we use `workflow_call`, which changes the job names from
    # `Model tests (...)` to `PyTorch 1.5 / Model tests (...)` for example.
    job_name_prefix = ""
    if ci_event.startswith("Past CI - "):
        framework, version = ci_event.replace("Past CI - ", "").split("-")
        framework = "PyTorch" if framework == "pytorch" else "TensorFlow"
        job_name_prefix = f"{framework} {version}"
894
895
    elif ci_event.startswith("Nightly CI"):
        job_name_prefix = "Nightly CI"
Yih-Dar's avatar
Yih-Dar committed
896

897
898
    for model in model_results.keys():
        for artifact_path in available_artifacts[f"run_all_tests_gpu_{model}_test_reports"].paths:
899
            artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"])
900
901
            if "stats" in artifact:
                # Link to the GitHub Action job
Yih-Dar's avatar
Yih-Dar committed
902
903
904
905
906
                # The job names use `matrix.folder` which contain things like `models/bert` instead of `models_bert`
                job_name = f"Model tests ({model.replace('models_', 'models/')}, {artifact_path['gpu']}-gpu)"
                if job_name_prefix:
                    job_name = f"{job_name_prefix} / {job_name}"
                model_results[model]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(job_name)
907
908
909
910
911
912
913
                failed, success, time_spent = handle_test_results(artifact["stats"])
                model_results[model]["success"] += success
                model_results[model]["time_spent"] += time_spent[1:-1] + ", "

                stacktraces = handle_stacktraces(artifact["failures_line"])

                for line in artifact["summary_short"].split("\n"):
914
915
                    if line.startswith("FAILED "):
                        line = line[len("FAILED ") :]
916
917
918
                        line = line.split()[0].replace("\n", "")

                        if artifact_path["gpu"] not in model_results[model]["failures"]:
919
                            model_results[model]["failures"][artifact_path["gpu"]] = []
920

921
922
923
                        model_results[model]["failures"][artifact_path["gpu"]].append(
                            {"line": line, "trace": stacktraces.pop(0)}
                        )
924

925
                        if re.search("test_modeling_tf_", line):
926
927
                            model_results[model]["failed"]["TensorFlow"][artifact_path["gpu"]] += 1

928
                        elif re.search("test_modeling_flax_", line):
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
                            model_results[model]["failed"]["Flax"][artifact_path["gpu"]] += 1

                        elif re.search("test_modeling", line):
                            model_results[model]["failed"]["PyTorch"][artifact_path["gpu"]] += 1

                        elif re.search("test_tokenization", line):
                            model_results[model]["failed"]["Tokenizers"][artifact_path["gpu"]] += 1

                        elif re.search("test_pipelines", line):
                            model_results[model]["failed"]["Pipelines"][artifact_path["gpu"]] += 1

                        elif re.search("test_trainer", line):
                            model_results[model]["failed"]["Trainer"][artifact_path["gpu"]] += 1

                        elif re.search("onnx", line):
                            model_results[model]["failed"]["ONNX"][artifact_path["gpu"]] += 1

                        elif re.search("auto", line):
                            model_results[model]["failed"]["Auto"][artifact_path["gpu"]] += 1

                        else:
                            model_results[model]["failed"]["Unclassified"][artifact_path["gpu"]] += 1
                            unclassified_model_failures.append(line)

    # Additional runs
    additional_files = {
        "Examples directory": "run_examples_gpu",
        "PyTorch pipelines": "run_tests_torch_pipeline_gpu",
        "TensorFlow pipelines": "run_tests_tf_pipeline_gpu",
        "Torch CUDA extension tests": "run_tests_torch_cuda_extensions_gpu_test_reports",
    }

Yih-Dar's avatar
Yih-Dar committed
961
962
963
964
965
    if ci_event == "push":
        del additional_files["Examples directory"]
        del additional_files["PyTorch pipelines"]
        del additional_files["TensorFlow pipelines"]

966
967
968
969
970
971
972
    additional_results = {
        key: {
            "failed": {"unclassified": 0, "single": 0, "multi": 0},
            "success": 0,
            "time_spent": "",
            "error": False,
            "failures": {},
973
            "job_link": {},
974
975
976
977
978
979
980
981
982
983
984
985
        }
        for key in additional_files.keys()
    }

    for key in additional_results.keys():
        # If a whole suite of test fails, the artifact isn't available.
        if additional_files[key] not in available_artifacts:
            additional_results[key]["error"] = True
            continue

        for artifact_path in available_artifacts[additional_files[key]].paths:
            if artifact_path["gpu"] is not None:
986
                additional_results[key]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(
Yih-Dar's avatar
Yih-Dar committed
987
                    f"{key} ({artifact_path['gpu']}-gpu)"
988
                )
989
990
991
            else:
                additional_results[key]["job_link"][artifact_path["gpu"]] = github_actions_job_links.get(key)

992
            artifact = retrieve_artifact(artifact_path["path"], artifact_path["gpu"])
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
            stacktraces = handle_stacktraces(artifact["failures_line"])

            failed, success, time_spent = handle_test_results(artifact["stats"])
            additional_results[key]["failed"][artifact_path["gpu"] or "unclassified"] += failed
            additional_results[key]["success"] += success
            additional_results[key]["time_spent"] += time_spent[1:-1] + ", "

            if len(artifact["errors"]):
                additional_results[key]["error"] = True

            if failed:
                for line in artifact["summary_short"].split("\n"):
1005
1006
                    if line.startswith("FAILED "):
                        line = line[len("FAILED ") :]
1007
1008
1009
                        line = line.split()[0].replace("\n", "")

                        if artifact_path["gpu"] not in additional_results[key]["failures"]:
1010
                            additional_results[key]["failures"][artifact_path["gpu"]] = []
1011

1012
1013
1014
                        additional_results[key]["failures"][artifact_path["gpu"]].append(
                            {"line": line, "trace": stacktraces.pop(0)}
                        )
1015

1016
1017
1018
1019
1020
1021
1022
    selected_warnings = []
    if "warnings_in_ci" in available_artifacts:
        directory = available_artifacts["warnings_in_ci"].paths[0]["path"]
        with open(os.path.join(directory, "selected_warnings.json")) as fp:
            selected_warnings = json.load(fp)

    message = Message(title, ci_title, model_results, additional_results, selected_warnings=selected_warnings)
1023

Yih-Dar's avatar
Yih-Dar committed
1024
1025
    # send report only if there is any failure (for push CI)
    if message.n_failures or ci_event != "push":
Yih-Dar's avatar
Yih-Dar committed
1026
1027
        message.post()
        message.post_reply()