Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ColossalAI
Commits
85f933b5
Unverified
Commit
85f933b5
authored
Jul 14, 2022
by
Jiarui Fang
Committed by
GitHub
Jul 14, 2022
Browse files
[Optimizer] Remove useless ColoOptimizer (#1312)
parent
c9c37dcc
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
8 additions
and
90 deletions
+8
-90
colossalai/nn/optimizer/__init__.py
colossalai/nn/optimizer/__init__.py
+1
-3
colossalai/nn/optimizer/colo_optimizer.py
colossalai/nn/optimizer/colo_optimizer.py
+0
-80
colossalai/tensor/colo_parameter.py
colossalai/tensor/colo_parameter.py
+0
-1
tests/test_tensor/test_model.py
tests/test_tensor/test_model.py
+5
-4
tests/test_utils/test_colo_checkpoint.py
tests/test_utils/test_colo_checkpoint.py
+2
-2
No files found.
colossalai/nn/optimizer/__init__.py
View file @
85f933b5
...
...
@@ -7,9 +7,7 @@ from .lamb import Lamb
from
.lars
import
Lars
from
.cpu_adam
import
CPUAdam
from
.hybrid_adam
import
HybridAdam
from
.colo_optimizer
import
ColoOptimizer
__all__
=
[
'ColossalaiOptimizer'
,
'FusedLAMB'
,
'FusedAdam'
,
'FusedSGD'
,
'Lamb'
,
'Lars'
,
'CPUAdam'
,
'HybridAdam'
,
'CPU_ADAM_CNT'
,
'ColoOptimizer'
'ColossalaiOptimizer'
,
'FusedLAMB'
,
'FusedAdam'
,
'FusedSGD'
,
'Lamb'
,
'Lars'
,
'CPUAdam'
,
'HybridAdam'
,
'CPU_ADAM_CNT'
]
colossalai/nn/optimizer/colo_optimizer.py
deleted
100644 → 0
View file @
c9c37dcc
from
typing
import
List
,
Union
,
Mapping
,
Dict
,
Any
import
torch.optim
as
optim
from
torch
import
Tensor
from
colossalai.tensor.colo_tensor
import
ColoTensor
class
ColoOptimizer
(
optim
.
Optimizer
):
def
__init__
(
self
,
named_params
:
Mapping
[
str
,
Union
[
Tensor
,
ColoTensor
]],
optimizer_class
,
*
optimizer_args
,
**
optimizer_kwargs
):
"""
ColoOptimizer collects all tensors in type of ColoTensor and torch.Tensor,
then use these tensors as ``params`` for optimizers
Args:
named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
of parameters, where key is the parameter key, value is either
Tensor or ColoTensor. This usually used in
conjunction with model.named_parameters(), the same as PyTorch.
optimizer_class (torch.optim.Optimizer): the Optimizer to use
locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
*optimizer_args: the arguments to initialize the optimizer.
**optimizer_kwargs: the key-word arguments to initialize the optimizer.
"""
self
.
_optim
=
optimizer_class
([
p
for
n
,
p
in
named_params
],
*
optimizer_args
,
**
optimizer_kwargs
)
self
.
param_groups
=
self
.
_optim
.
param_groups
self
.
state
=
self
.
_optim
.
state
def
zero_grad
(
self
,
set_to_none
:
bool
=
False
):
# type: ignore[override]
r
"""Sets the gradients of all optimized :class:`torch.Tensor` s to zero.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
"""
self
.
_optim
.
zero_grad
(
set_to_none
)
def
step
(
self
,
closure
=
None
):
r
"""Performs a single optimization step (parameter update).
Args:
closure (callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note::
Unless otherwise specified, this function should not modify the
``.grad`` field of the parameters.
"""
self
.
_optim
.
step
(
closure
)
def
state_dict
(
self
)
->
Dict
[
str
,
Any
]:
"""
Returned state and param_groups will contain parameter keys
instead of parameter indices like torch.optim.Optimizer.
"""
return
self
.
_optim
.
state_dict
()
def
load_state_dict
(
self
,
state_dict
:
Mapping
[
str
,
Any
]):
r
"""Loads the ColoOptimizer state.
Args:
state_dict (dict): ColoOptimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self
.
_optim
.
load_state_dict
(
state_dict
)
def
add_param_group
(
self
,
param_group
:
Any
):
r
"""Add a new param group
"""
self
.
_optim
.
add_param_group
(
param_group
)
colossalai/tensor/colo_parameter.py
View file @
85f933b5
import
torch
from
typing
import
Optional
from
copy
import
copy
from
colossalai.tensor.colo_tensor
import
ColoTensor
from
colossalai.tensor.const
import
TensorType
...
...
tests/test_tensor/test_model.py
View file @
85f933b5
...
...
@@ -12,7 +12,7 @@ from colossalai.utils.cuda import get_current_device
from
colossalai.utils
import
free_port
from
colossalai.utils.model.colo_init_context
import
ColoInitContext
from
colossalai.tensor
import
ColoTensor
,
ProcessGroup
from
colossalai.nn.optimizer
import
ColoOptimizer
from
colossalai.nn.optimizer
import
Colo
ssalai
Optimizer
from
tests.components_to_test.registry
import
non_distributed_component_funcs
from
_utils
import
split_param_row_tp1d
,
split_param_col_tp1d
...
...
@@ -33,7 +33,8 @@ def run_1d_hybrid_tp(model_name):
if
rank
==
0
:
model_torch
=
model_builder
(
checkpoint
=
True
)
model_torch
=
model_torch
.
cuda
()
optimizer_torch
=
ColoOptimizer
(
model_torch
.
named_parameters
(),
torch
.
optim
.
SGD
,
lr
=
0.1
)
optimizer_torch
=
ColossalaiOptimizer
(
torch
.
optim
.
SGD
(
model_torch
.
parameters
(),
lr
=
0.1
))
# Make two models have the same init params
for
p1
,
p2
in
zip
(
model
.
parameters
(),
model_torch
.
parameters
()):
...
...
@@ -80,7 +81,7 @@ def run_1d_hybrid_tp(model_name):
if
rank
==
0
:
model_torch
.
train
()
colo_optimizer
=
ColoOptimizer
(
model
.
named_parameters
(),
torch
.
optim
.
SGD
,
lr
=
0.1
)
colo_optimizer
=
Colo
ssalai
Optimizer
(
torch
.
optim
.
SGD
(
model
.
parameters
()
,
lr
=
0.1
)
)
for
i
,
(
data
,
label
)
in
enumerate
(
train_dataloader
):
...
...
@@ -170,7 +171,7 @@ def test_colo_optimizer():
with
ColoInitContext
(
lazy_memory_allocate
=
False
,
device
=
get_current_device
()):
model
=
model_builder
(
checkpoint
=
True
)
colo_optimizer
=
ColoOptimizer
(
model
.
named_parameters
(),
torch
.
optim
.
SGD
,
lr
=
0.1
)
colo_optimizer
=
Colo
ssalai
Optimizer
(
torch
.
optim
.
SGD
(
model
.
parameters
()
,
lr
=
0.1
)
)
for
i
,
(
data
,
label
)
in
enumerate
(
train_dataloader
):
colo_optimizer
.
zero_grad
()
data
=
data
.
to
(
get_current_device
())
...
...
tests/test_utils/test_colo_checkpoint.py
View file @
85f933b5
...
...
@@ -18,7 +18,7 @@ from colossalai.utils.model.colo_init_context import ColoInitContext
from
colossalai.tensor
import
ComputePattern
,
ComputeSpec
,
ColoTensor
,
ShardSpec
,
ProcessGroup
,
DistSpecManager
,
ReplicaSpec
from
colossalai.nn.parallel.data_parallel
import
ColoDDP
from
colossalai.utils.checkpoint
import
save_checkpoint
,
load_checkpoint
from
colossalai.nn.optimizer
import
ColoOptimizer
from
colossalai.nn.optimizer
import
Colo
ssalai
Optimizer
from
tests.components_to_test.registry
import
non_distributed_component_funcs
...
...
@@ -117,7 +117,7 @@ def _run_checkpoint(model_name, init_spec_func, use_ddp, use_mp_reload, test_sch
model_reload
=
model_reload
.
cuda
()
model_reload
.
train
()
colo_optimizer
=
ColoOptimizer
(
model
.
named_parameters
(),
torch
.
optim
.
SGD
,
l
r
=
0.1
)
colo_optimizer
=
Colo
ssalai
Optimizer
(
torch
.
optim
.
SGD
(
model
.
named_parameters
(),
r
=
0.1
)
)
for
i
,
(
data
,
label
)
in
enumerate
(
train_dataloader
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment