Unverified Commit 30aee9c4 authored by binmakeswell's avatar binmakeswell Committed by GitHub
Browse files

[NFC] polish code format

[NFC] polish code format
parents 1dc003c1 93b788b9
import click
from .launcher import run
from .check import check
from .benchmark import benchmark
from .check import check
from .launcher import run
class Arguments():
......
import click
from .run import launch_multi_processes
from colossalai.context import Config
from .run import launch_multi_processes
@click.command(help="Launch distributed training on a single node or multiple nodes",
context_settings=dict(ignore_unknown_options=True))
......
from typing import Tuple
import torch
import torch.distributed as dist
......@@ -5,8 +7,6 @@ from colossalai.context.parallel_mode import ParallelMode
from colossalai.context.singleton_meta import SingletonMeta
from colossalai.tensor import ProcessGroup
from typing import Tuple
def _check_sanity():
from colossalai.core import global_context as gpc
......
......@@ -2,10 +2,11 @@ import math
import torch.distributed as dist
from colossalai.global_variables import tensor_parallel_env as env
from colossalai.registry import DIST_GROUP_INITIALIZER
from .process_group_initializer import ProcessGroupInitializer
from ..parallel_mode import ParallelMode
from colossalai.global_variables import tensor_parallel_env as env
from .process_group_initializer import ProcessGroupInitializer
def _check_summa_env_var(summa_dim):
......
......@@ -4,8 +4,9 @@
from torch import distributed as dist
from colossalai.registry import DIST_GROUP_INITIALIZER
from .process_group_initializer import ProcessGroupInitializer
from ..parallel_mode import ParallelMode
from .process_group_initializer import ProcessGroupInitializer
@DIST_GROUP_INITIALIZER.register_module
......
......@@ -3,9 +3,10 @@
import torch.distributed as dist
from colossalai.registry import DIST_GROUP_INITIALIZER
from ..parallel_mode import ParallelMode
from .initializer_tensor import Initializer_Tensor
from .process_group_initializer import ProcessGroupInitializer
from ..parallel_mode import ParallelMode
@DIST_GROUP_INITIALIZER.register_module
......
from typing import Iterable
import torch.distributed as dist
import torch.nn as nn
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from typing import Iterable
def bucket_allreduce(param_list: Iterable[nn.Parameter], group=None):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment