profiler.py 1.56 KB
Newer Older
1
2
3
import time
import torch
from contextlib import ContextDecorator
4
from lightx2v.utils.envs import *
root's avatar
root committed
5
from loguru import logger
6
7
8
9
10


class _ProfilingContext(ContextDecorator):
    def __init__(self, name):
        self.name = name
11
12
13
14
        self.rank_info = ""
        if torch.distributed.is_available() and torch.distributed.is_initialized():
            rank = torch.distributed.get_rank()
            self.rank_info = f"Rank {rank} - "
15
16
17

    def __enter__(self):
        torch.cuda.synchronize()
18
19
        if torch.cuda.is_available():
            torch.cuda.reset_peak_memory_stats()
20
21
22
23
24
        self.start_time = time.perf_counter()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        torch.cuda.synchronize()
25
26
27
28
29
        if torch.cuda.is_available():
            peak_memory = torch.cuda.max_memory_allocated() / (1024**3)  # 转换为GB
            logger.info(f"{self.rank_info}Function '{self.name}' Peak Memory: {peak_memory:.2f} GB")
        else:
            logger.info(f"{self.rank_info}Function '{self.name}' executed without GPU.")
30
        elapsed = time.perf_counter() - self.start_time
root's avatar
root committed
31
        logger.info(f"[Profile] {self.name} cost {elapsed:.6f} seconds")
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
        return False


class _NullContext(ContextDecorator):
    # Context manager without decision branch logic overhead
    def __init__(self, *args, **kwargs):
        pass

    def __enter__(self):
        return self

    def __exit__(self, *args):
        return False


ProfilingContext = _ProfilingContext
helloyongyang's avatar
helloyongyang committed
48
ProfilingContext4Debug = _ProfilingContext if CHECK_ENABLE_PROFILING_DEBUG() else _NullContext