Unverified Commit 09d69e1c authored by Ziyue Jiang's avatar Ziyue Jiang Committed by GitHub
Browse files

[PP Middleware] Add bwd and step for PP middleware (#2111)



* add bwd and step for PP middleware

* pre-commit
Co-authored-by: default avatarZiyue Jiang <ziyue.jiang@gmail.com>
parent 8afc001f
This diff is collapsed.
...@@ -89,9 +89,6 @@ class OneFOneBWorker(WorkerBase): ...@@ -89,9 +89,6 @@ class OneFOneBWorker(WorkerBase):
elif target_key.microbatch_id == num_microbatches - 1: elif target_key.microbatch_id == num_microbatches - 1:
self.outstanding_range = (0, 0) self.outstanding_range = (0, 0)
with self.work_list_condition_lock:
self.work_list_condition_lock.wait_for(lambda: target_key in self.work_list)
return target_key return target_key
......
...@@ -57,7 +57,6 @@ def split_batch(batch: Any, start, stop, device: str): ...@@ -57,7 +57,6 @@ def split_batch(batch: Any, start, stop, device: str):
def type_detail(obj): def type_detail(obj):
return pytree_map(obj, lambda x: type(x), map_all=True) return pytree_map(obj, lambda x: type(x), map_all=True)
def pytree_filter(fn, obj, process_types): def pytree_filter(fn, obj, process_types):
if obj is None: if obj is None:
return None return None
......
...@@ -31,7 +31,7 @@ class MLP(nn.Module): ...@@ -31,7 +31,7 @@ class MLP(nn.Module):
def forward(self, x): def forward(self, x):
for layer in self.layers: for layer in self.layers:
x = layer(x) x = layer(x)
return x return x.sum()
class DAG_MLP(nn.Module): class DAG_MLP(nn.Module):
def __init__(self, dim: int, layers: int): def __init__(self, dim: int, layers: int):
...@@ -46,7 +46,7 @@ class DAG_MLP(nn.Module): ...@@ -46,7 +46,7 @@ class DAG_MLP(nn.Module):
for layer in self.layers: for layer in self.layers:
x = layer(x) x = layer(x)
y = self.dag_layer(y) y = self.dag_layer(y)
return x, y return x.sum(), y.sum()
class RpcTestModel(nn.Module): class RpcTestModel(nn.Module):
......
...@@ -41,10 +41,10 @@ def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int ...@@ -41,10 +41,10 @@ def partition(model, data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int
partition = create_partition_module(pp_rank, stage_num, model, data_kwargs) partition = create_partition_module(pp_rank, stage_num, model, data_kwargs)
return partition return partition
def run_master(model_cls, world_size): def run_master(model_cls, world_size, forward_only):
torch.manual_seed(100) torch.manual_seed(100)
epoch = 10 epoch = 3
device = 'cuda' device = 'cuda'
stage_num = world_size stage_num = world_size
chunk = 1 chunk = 1
...@@ -57,6 +57,10 @@ def run_master(model_cls, world_size): ...@@ -57,6 +57,10 @@ def run_master(model_cls, world_size):
kwargs = dict(x=x) kwargs = dict(x=x)
return kwargs return kwargs
model = model_cls(dim, stage_num * 3) model = model_cls(dim, stage_num * 3)
if forward_only:
labels = None
else:
labels = 1
elif model_cls == DAG_MLP: elif model_cls == DAG_MLP:
def data_gen(): def data_gen():
x = torch.zeros((batch_size, dim)) x = torch.zeros((batch_size, dim))
...@@ -64,24 +68,30 @@ def run_master(model_cls, world_size): ...@@ -64,24 +68,30 @@ def run_master(model_cls, world_size):
kwargs = dict(x=x, y=y) kwargs = dict(x=x, y=y)
return kwargs return kwargs
model = model_cls(dim, stage_num * 3) model = model_cls(dim, stage_num * 3)
if forward_only:
labels = None
else:
labels = 1
else: else:
pass pass
data_kwargs = data_gen() data_kwargs = data_gen()
engine = OneFOneBPipelineEngine(partition_fn=partial(partition, model, data_kwargs), engine = OneFOneBPipelineEngine(partition_fn=partial(partition, model, data_kwargs),
stage_num=stage_num, stage_num=stage_num,
num_microbatches=num_microbatches, num_microbatches=num_microbatches,
device=device, device=device,
chunk=chunk, chunk=chunk,
checkpoint=use_checkpoint,) checkpoint=use_checkpoint,)
if not forward_only:
engine.initialize_optimizer(getattr(torch.optim, 'SGD'), lr=1e-3)
for _ in range(epoch): for _ in range(epoch):
input_x = torch.randn((batch_size, dim), device=device) input_x = torch.randn((batch_size, dim), device=device)
input_y = torch.randn((batch_size, dim), device=device) input_y = torch.randn((batch_size, dim), device=device)
logits = engine.forward_backward({'x': input_x, 'y': input_y}, forward_only=True) logits = engine.forward_backward({'x': input_x, 'y': input_y}, labels=labels, forward_only=forward_only)
def run_worker(rank, model_cls, world_size, master_func): def run_worker(rank, model_cls, world_size, forward_only, master_func):
master_addr = 'localhost' master_addr = 'localhost'
master_port = 29020 master_port = 29020
os.environ['MASTER_ADDR'] = master_addr os.environ['MASTER_ADDR'] = master_addr
...@@ -99,19 +109,20 @@ def run_worker(rank, model_cls, world_size, master_func): ...@@ -99,19 +109,20 @@ def run_worker(rank, model_cls, world_size, master_func):
# in rpc mode, only rank 0 is needed to be coded # in rpc mode, only rank 0 is needed to be coded
if rank == 0: if rank == 0:
master_func(model_cls, world_size) master_func(model_cls, world_size, forward_only)
# barrier here # barrier here
if rpc_is_initialized(): if rpc_is_initialized():
rpc.shutdown() rpc.shutdown()
@pytest.mark.skip("skip due to CI torch version 1.11") @pytest.mark.skip("skip due to CI torch version 1.11")
@parameterize('model_cls', [MLP, DAG_MLP]) @parameterize('model_cls', [MLP, DAG_MLP])
@parameterize('forward_only', [True, False])
@pytest.mark.dist @pytest.mark.dist
@rerun_if_address_is_in_use() @rerun_if_address_is_in_use()
def test_pp_middleware_fwd(model_cls): def test_pp_middleware_fwd(model_cls, forward_only):
world_size = 4 world_size = 4
master_func = run_master master_func = run_master
mp.spawn(run_worker, args=(model_cls, world_size, master_func), nprocs=world_size) mp.spawn(run_worker, args=(model_cls, world_size, forward_only, master_func), nprocs=world_size)
if __name__ == "__main__": if __name__ == "__main__":
test_pp_middleware_fwd() test_pp_middleware_fwd()
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment