Unverified Commit a0064407 authored by Jiarui Fang's avatar Jiarui Fang Committed by GitHub
Browse files

reorgnize colotensor directory (#1062)

* reorgnize colotensor directory

* polish code
parent 3d10be33
......@@ -4,3 +4,7 @@ from .lr_scheduler import *
from .metric import *
from .model import *
from .optimizer import *
from ._ops import *
from .modules import ColoLinear, ColoEmbedding
from .module_utils import register_colo_module, is_colo_module, get_colo_module, init_colo_module, check_colo_module
......@@ -10,6 +10,7 @@ def register_colo_module(module_type: type, colo_module: ColoModule):
global _COLOSSAL_MODULES
_COLOSSAL_MODULES[module_type] = colo_module
def is_colo_module(module: torch.nn.Module):
global _COLOSSAL_MODULES
for module_type in _COLOSSAL_MODULES.keys():
......@@ -17,6 +18,7 @@ def is_colo_module(module: torch.nn.Module):
return True
return False
def get_colo_module(module: torch.nn.Module):
global _COLOSSAL_MODULES
if is_colo_module(module):
......@@ -26,6 +28,7 @@ def get_colo_module(module: torch.nn.Module):
else:
return None
def check_colo_module(module: torch.nn.Module, recursive=True):
if is_colo_module(module):
colo_module = get_colo_module(module)
......@@ -35,20 +38,22 @@ def check_colo_module(module: torch.nn.Module, recursive=True):
param = module.get_parameter(param_name)
if not isinstance(param, ColoParameter):
raise Exception(f'Invalid ColoParameter spec: {param} in {module} is not a ColoParameter.')
if param.has_spec():
if param.has_spec():
cur_compute_pattern = param.spec.parallel_action.compute_pattern
if compute_pattern is None:
compute_pattern = cur_compute_pattern
else:
if cur_compute_pattern != compute_pattern:
raise Exception(f'Invalid ColoParameter spec: Params in {module} have different compute_pattern.')
raise Exception(
f'Invalid ColoParameter spec: Params in {module} have different compute_pattern.')
else:
continue
if compute_pattern is not None:
colo_module.register(compute_pattern)
if not colo_module.has_compute_pattern(compute_pattern):
raise Exception(f'Invalid ColoParameter spec: ComputePattern {compute_pattern} in {module} is not allowed.')
raise Exception(
f'Invalid ColoParameter spec: ComputePattern {compute_pattern} in {module} is not allowed.')
match_specs = False
allowed_specs = colo_module.get_dist_specs(compute_pattern)
......@@ -73,6 +78,7 @@ def check_colo_module(module: torch.nn.Module, recursive=True):
for submodule in module.children():
check_colo_module(submodule, recursive=True)
def init_colo_module(module: torch.nn.Module, parallel_action: ParallelAction, recursive=True, mode='default'):
compute_pattern = parallel_action.compute_pattern
if is_colo_module(module):
......@@ -99,4 +105,3 @@ def init_colo_module(module: torch.nn.Module, parallel_action: ParallelAction, r
if recursive == True:
for submodule in module.children():
init_colo_module(submodule, parallel_action, recursive=True, mode=mode)
\ No newline at end of file
......@@ -4,11 +4,12 @@ from typing import List, Dict
class ColoModule(object):
def __init__(self):
self._shard_params: List[str] = []
# Example:
# {ComputePattern.TP1D:
# 'default':
# 'default':
# 'weight':
# distspec.shard(xxxxx)
# 'bias':
......@@ -21,25 +22,29 @@ class ColoModule(object):
def _register_shard_params(self, params: List[str]):
self._shard_params = params
def _register_allowed_patterns(self, compute_pattern: ComputePattern, dist_specs: Dict[str, _DistSpec], mode='default'):
assert list(dist_specs.keys()).sort() == self._shard_params.sort(), 'Every registered param should have dist_spec.'
def _register_allowed_patterns(self,
compute_pattern: ComputePattern,
dist_specs: Dict[str, _DistSpec],
mode='default'):
assert list(
dist_specs.keys()).sort() == self._shard_params.sort(), 'Every registered param should have dist_spec.'
if not compute_pattern in self._allowed_patterns:
self._allowed_patterns[compute_pattern] = {}
self._allowed_patterns[compute_pattern][mode] = dist_specs
def _set_default(self, compute_pattern: ComputePattern, target_mode):
self._allowed_patterns[compute_pattern]['default'] = self._allowed_patterns[compute_pattern][target_mode]
def has_compute_pattern(self, compute_pattern: ComputePattern):
return compute_pattern in self._allowed_patterns
def get_dist_specs(self, compute_pattern: ComputePattern):
assert self.has_compute_pattern(compute_pattern)
return self._allowed_patterns[compute_pattern]
def has_compute_pattern_with_mode(self, compute_pattern: ComputePattern, mode='default'):
return compute_pattern in self._allowed_patterns and mode in self._allowed_patterns[compute_pattern]
def get_dist_specs_with_mode(self, compute_pattern: ComputePattern, mode='default'):
assert self.has_compute_pattern_with_mode(compute_pattern, mode)
return self._allowed_patterns[compute_pattern][mode]
......@@ -48,4 +53,4 @@ class ColoModule(object):
return self._shard_params
def register(self, compute_pattern):
raise NotImplementedError
\ No newline at end of file
raise NotImplementedError
......@@ -3,23 +3,27 @@ from colossalai.tensor import ComputePattern, distspec
from colossalai.core import global_context as gpc
from colossalai.context.parallel_mode import ParallelMode
class ColoEmbedding(ColoModule):
def __init__(self):
super(ColoEmbedding, self).__init__()
self._register_shard_params(['weight'])
def register(self, compute_pattern):
if not compute_pattern in self._allowed_patterns:
if ComputePattern.TP1D == compute_pattern:
self._set_TP1D()
def _set_TP1D(self):
# TP1D Row Linear
_compute_pattern = ComputePattern.TP1D
self._register_allowed_patterns(
compute_pattern=_compute_pattern,
dist_specs={
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'weight':
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0],
[gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
},
mode='row',
)
......@@ -28,9 +32,11 @@ class ColoEmbedding(ColoModule):
self._register_allowed_patterns(
compute_pattern=_compute_pattern,
dist_specs={
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'weight':
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1],
[gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
},
mode='col',
)
self._set_default(compute_pattern=_compute_pattern, target_mode='row')
\ No newline at end of file
self._set_default(compute_pattern=_compute_pattern, target_mode='row')
......@@ -3,24 +3,29 @@ from colossalai.tensor import ComputePattern, distspec
from colossalai.core import global_context as gpc
from colossalai.context.parallel_mode import ParallelMode
class ColoLinear(ColoModule):
def __init__(self):
super(ColoLinear, self).__init__()
self._register_shard_params(['weight', 'bias'])
def register(self, compute_pattern):
if not compute_pattern in self._allowed_patterns:
if ComputePattern.TP1D == compute_pattern:
self._set_TP1D()
def _set_TP1D(self):
# TP1D Row Linear
_compute_pattern = ComputePattern.TP1D
self._register_allowed_patterns(
compute_pattern=_compute_pattern,
dist_specs={
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'bias': None
'weight':
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [-1],
[gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'bias':
None
},
mode='row',
)
......@@ -29,8 +34,12 @@ class ColoLinear(ColoModule):
self._register_allowed_patterns(
compute_pattern=_compute_pattern,
dist_specs={
'weight': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'bias': distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0], [gpc.get_world_size(ParallelMode.PARALLEL_1D)])
'weight':
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0],
[gpc.get_world_size(ParallelMode.PARALLEL_1D)]),
'bias':
distspec.shard(gpc.get_group(ParallelMode.PARALLEL_1D), [0],
[gpc.get_world_size(ParallelMode.PARALLEL_1D)])
},
mode='col',
)
......
......@@ -7,7 +7,9 @@ from .lamb import Lamb
from .lars import Lars
from .cpu_adam import CPUAdam
from .hybrid_adam import HybridAdam
from .colo_optimizer import ColoOptimizer
__all__ = [
'ColossalaiOptimizer', 'FusedLAMB', 'FusedAdam', 'FusedSGD', 'Lamb', 'Lars', 'CPUAdam', 'HybridAdam', 'CPU_ADAM_CNT'
'ColossalaiOptimizer', 'FusedLAMB', 'FusedAdam', 'FusedSGD', 'Lamb', 'Lars', 'CPUAdam', 'HybridAdam',
'CPU_ADAM_CNT', 'ColoOptimizer'
]
from .spec import ComputePattern, ParallelAction, TensorSpec
from .op_wrapper import (
colo_op_impl,)
from .colo_tensor import ColoTensor
from .colo_parameter import ColoParameter
from .utils import convert_parameter, named_params_with_colotensor
from ._ops import *
from .optim.colo_optimizer import ColoOptimizer
from . import distspec
from .dist_spec_mgr import DistSpecManager
from .param_op_hook import ParamOpHook, use_param_op_hooks
from .chunk import ChunkManager, TensorState
from .module_utils import register_colo_module, is_colo_module, get_colo_module, init_colo_module, check_colo_module
from .modules import ColoLinear, ColoEmbedding
__all__ = [
'ColoTensor', 'convert_parameter', 'colo_op_impl', 'ComputePattern', 'TensorSpec', 'ParallelAction',
'named_params_with_colotensor', 'ColoOptimizer', 'ColoParameter', 'distspec', 'DistSpecManager',
'register_colo_module', 'is_colo_module', 'get_colo_module', 'init_colo_module', 'check_colo_module', 'ColoLinear',
'ColoEmbedding', 'ParamOpHook', 'use_param_op_hooks', 'ChunkManager', 'TensorState'
'ColoTensor', 'convert_parameter', 'ComputePattern', 'TensorSpec', 'ParallelAction', 'named_params_with_colotensor',
'ColoParameter', 'distspec', 'DistSpecManager', 'ParamOpHook', 'use_param_op_hooks', 'ChunkManager', 'TensorState'
]
from .colo_tensor import ColoTensor
from .const import TensorType
from colossalai.tensor.colo_tensor import ColoTensor
from colossalai.tensor.const import TensorType
import torch
from colossalai.tensor import TensorSpec, distspec
from copy import copy
from .param_op_hook import _ParamOpHookWrapper, PreFwdPostBwd, PostFwdPreBwd
from colossalai.tensor.param_op_hook import _ParamOpHookWrapper, PreFwdPostBwd, PostFwdPreBwd
from typing import Optional
......
from colossalai.tensor.distspec import _DistSpec
from colossalai.nn.layer.utils import divide
# from colossalai.nn.layer.utils import divide
from numpy import prod
from contextlib import contextmanager
import torch
import torch.distributed as dist
# TODO(jiaruifang) circle import, move the divide to colossalai.commons.
# colossalai.tensor shall not import any submodule from colossal.nn
def divide(numerator, denominator):
"""Only allow exact division.
Args:
numerator (int): Numerator of the division.
denominator (int): Denominator of the division.
Returns:
int: the result of exact division.
"""
assert denominator != 0, 'denominator can not be zero'
assert numerator % denominator == 0, \
'{} is not divisible by {}'.format(numerator, denominator)
return numerator // denominator
class TransformDistSpec(torch.autograd.Function):
@staticmethod
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment