Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
46c009db
Unverified
Commit
46c009db
authored
Apr 06, 2023
by
Hakjin Lee
Committed by
GitHub
Apr 05, 2023
Browse files
[format] Run lint on colossalai.engine (#3367)
parent
b9231390
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
32 additions
and
20 deletions
+32
-20
colossalai/engine/gradient_accumulation/__init__.py
colossalai/engine/gradient_accumulation/__init__.py
+11
-4
colossalai/engine/gradient_handler/_base_gradient_handler.py
colossalai/engine/gradient_handler/_base_gradient_handler.py
+1
-1
colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py
...ngine/gradient_handler/_data_parallel_gradient_handler.py
+4
-3
colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py
...e/gradient_handler/_pipeline_parallel_gradient_handler.py
+4
-3
colossalai/engine/gradient_handler/_sequence_parallel_gradient_handler.py
...e/gradient_handler/_sequence_parallel_gradient_handler.py
+4
-3
colossalai/engine/gradient_handler/_zero_gradient_handler.py
colossalai/engine/gradient_handler/_zero_gradient_handler.py
+1
-0
colossalai/engine/schedule/__init__.py
colossalai/engine/schedule/__init__.py
+1
-1
colossalai/engine/schedule/_base_schedule.py
colossalai/engine/schedule/_base_schedule.py
+1
-1
colossalai/engine/schedule/_non_pipeline_schedule.py
colossalai/engine/schedule/_non_pipeline_schedule.py
+5
-4
No files found.
colossalai/engine/gradient_accumulation/__init__.py
View file @
46c009db
from
typing
import
Iterable
,
List
import
torch.nn
as
nn
from
typing
import
List
from
colossalai.engine
import
BaseGradientHandler
from
typing
import
Iterable
from
torch.optim
import
Optimizer
from
torch.optim.lr_scheduler
import
_LRScheduler
from
._gradient_accumulation
import
GradAccumDataloader
,
GradAccumOptimizer
,
GradAccumLrSchedulerByStep
,
GradAccumGradientHandler
from
colossalai.engine
import
BaseGradientHandler
from
._gradient_accumulation
import
(
GradAccumDataloader
,
GradAccumGradientHandler
,
GradAccumLrSchedulerByStep
,
GradAccumOptimizer
,
)
__all__
=
[
'accumulate_gradient'
,
'GradAccumDataloader'
,
'GradAccumOptimizer'
,
'GradAccumLrSchedulerByStep'
,
...
...
colossalai/engine/gradient_handler/_base_gradient_handler.py
View file @
46c009db
...
...
@@ -5,7 +5,7 @@ from abc import ABC, abstractmethod
class
BaseGradientHandler
(
ABC
):
"""A basic helper class to handle all-reduce operations of gradients across different parallel groups
"""A basic helper class to handle all-reduce operations of gradients across different parallel groups
before optimization.
Args:
...
...
colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py
View file @
46c009db
from
colossalai.core
import
global_context
as
gpc
from
colossalai.registry
import
GRADIENT_HANDLER
from
._base_gradient_handler
import
BaseGradientHandler
from
...context.parallel_mode
import
ParallelMode
from
._base_gradient_handler
import
BaseGradientHandler
from
.utils
import
bucket_allreduce
@
GRADIENT_HANDLER
.
register_module
class
DataParallelGradientHandler
(
BaseGradientHandler
):
"""A helper class to handle all-reduce operations in a data parallel group.
A all-reduce collective communication will be operated in
A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
For better performance, it bucketizes the gradients of all parameters that are
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
Args:
...
...
colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py
View file @
46c009db
...
...
@@ -4,9 +4,10 @@ from collections import defaultdict
import
torch
import
torch.distributed
as
dist
from
torch._utils
import
_flatten_dense_tensors
,
_unflatten_dense_tensors
from
colossalai.core
import
global_context
as
gpc
from
colossalai.registry
import
GRADIENT_HANDLER
from
torch._utils
import
_flatten_dense_tensors
,
_unflatten_dense_tensors
from
._base_gradient_handler
import
BaseGradientHandler
...
...
@@ -14,9 +15,9 @@ from ._base_gradient_handler import BaseGradientHandler
@
GRADIENT_HANDLER
.
register_module
class
PipelineSharedModuleGradientHandler
(
BaseGradientHandler
):
"""A helper class to handle all-reduce operations in sub parallel groups.
A all-reduce collective communication will be operated in
A all-reduce collective communication will be operated in
:func:`handle_gradient` among all sub pipeline parallel groups.
For better performance, it bucketizes the gradients of all parameters that are
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
Args:
...
...
colossalai/engine/gradient_handler/_sequence_parallel_gradient_handler.py
View file @
46c009db
from
colossalai.core
import
global_context
as
gpc
from
colossalai.registry
import
GRADIENT_HANDLER
from
._base_gradient_handler
import
BaseGradientHandler
from
...context.parallel_mode
import
ParallelMode
from
._base_gradient_handler
import
BaseGradientHandler
from
.utils
import
bucket_allreduce
@
GRADIENT_HANDLER
.
register_module
class
SequenceParallelGradientHandler
(
BaseGradientHandler
):
"""A helper class to handle all-reduce operations in a data parallel group.
A all-reduce collective communication will be operated in
A all-reduce collective communication will be operated in
:func:`handle_gradient` among a data parallel group.
For better performance, it bucketizes the gradients of all parameters that are
For better performance, it bucketizes the gradients of all parameters that are
the same type to improve the efficiency of communication.
Args:
...
...
colossalai/engine/gradient_handler/_zero_gradient_handler.py
View file @
46c009db
from
colossalai.registry
import
GRADIENT_HANDLER
from
._base_gradient_handler
import
BaseGradientHandler
...
...
colossalai/engine/schedule/__init__.py
View file @
46c009db
from
._base_schedule
import
BaseSchedule
from
._pipeline_schedule
import
PipelineSchedule
,
InterleavedPipelineSchedule
,
get_tensor_shape
from
._non_pipeline_schedule
import
NonPipelineSchedule
from
._pipeline_schedule
import
InterleavedPipelineSchedule
,
PipelineSchedule
,
get_tensor_shape
__all__
=
[
'BaseSchedule'
,
'NonPipelineSchedule'
,
'PipelineSchedule'
,
'InterleavedPipelineSchedule'
,
'get_tensor_shape'
]
colossalai/engine/schedule/_base_schedule.py
View file @
46c009db
...
...
@@ -2,10 +2,10 @@
# -*- encoding: utf-8 -*-
from
abc
import
ABC
,
abstractmethod
from
typing
import
Callable
,
Iterable
import
torch
from
typing
import
Iterable
,
Callable
from
colossalai.logging
import
get_dist_logger
from
colossalai.utils
import
get_current_device
...
...
colossalai/engine/schedule/_non_pipeline_schedule.py
View file @
46c009db
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from
typing
import
Iterable
import
inspect
from
typing
import
Callable
,
Iterable
import
torch
import
inspect
from
._base_schedule
import
BaseSchedule
from
colossalai.utils
import
conditional_context
from
typing
import
Callable
from
._base_schedule
import
BaseSchedule
class
NonPipelineSchedule
(
BaseSchedule
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment