Unverified Commit b0936e4a authored by Ziyue Jiang's avatar Ziyue Jiang Committed by GitHub
Browse files

[rpc] split with dag (#2028)



* add DAG to split_module

* add comment

* add test case for DAG

* remove print

* add DAG middleware in scheduler

* add test case for scheduler

* remove break

* recover old lifecycle
Co-authored-by: default avatarZiyue Jiang <ziyue.jiang@gmail.com>
parent 96134e7b
......@@ -117,7 +117,7 @@ def uniform_split_pass(gm: torch.fx.GraphModule, pp_size: int):
return gm
def split_with_split_nodes_pass(annotated_gm: torch.fx.GraphModule):
def split_with_split_nodes_pass(annotated_gm: torch.fx.GraphModule, merge_output=False):
# TODO(lyl): use partition IR to assign partition ID to each node.
# Currently: analyzing graph -> annotate graph by inserting split node -> use split module pass to split graph
# In future: graph to partitions -> analyzing partition IR -> recombining partitions to get best performance -> assign partition ID to each node
......@@ -129,7 +129,7 @@ def split_with_split_nodes_pass(annotated_gm: torch.fx.GraphModule):
part_idx += 1
return part_idx
split_mod = split_module(annotated_gm, None, split_callback)
split_mod = split_module(annotated_gm, None, split_callback, merge_output)
split_submodules = []
for name, submodule in split_mod.named_modules():
if isinstance(submodule, torch.fx.GraphModule):
......
......@@ -199,24 +199,17 @@ def find_user_in_partition(node, partitions, output_partitions=None, direct=Fals
for partition in partitions:
if node == partition:
user_partition_names.append(partition.name)
# find user with getitem call
else:
for partition in partitions:
if node in partition.args:
user_partition_names.append(partition.name)
is_output = False
def find_output(def_node, output_node):
nonlocal is_output
if def_node == output_node:
is_output = True
if output_partitions is not None:
output_node = output_partitions[0]
torch.fx.graph.map_arg(output_node.args[0], lambda n: find_output(node, n))
if is_output:
user_partition_names.append('MODEL_OUTPUT')
if node.op == output_node.op:
user_partition_names.append('MODEL_OUTPUT')
if len(user_partition_names) > 0:
return user_partition_names
......
This diff is collapsed.
......@@ -20,6 +20,18 @@ def color_debug(text, prefix=' ', color='blue'):
color = color.upper()
print(getattr(Back, color), prefix, Style.RESET_ALL, text)
class MLP(nn.Module):
def __init__(self, dim: int, layers: int):
super().__init__()
self.layers = torch.nn.ModuleList()
for _ in range(layers):
self.layers.append(nn.Linear(dim, dim, bias=False))
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class RpcTestModel(nn.Module):
......
import torch
from torch import nn
from colossalai.pipeline.rpc._pipeline_schedule import OneFOneBPipelineEngine
from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass
from colossalai.fx import ColoTracer
from rpc_test_utils import rpc_run, parse_args, MLP
from functools import partial
# global variable for model created
batch_size = 16
dim = 10
def create_partition_module(pp_rank: int, stage_num: int, model, data_kwargs):
model.eval()
tracer = ColoTracer()
meta_args = {k: v.to('meta') for k, v in data_kwargs.items()}
graph = tracer.trace(root=model, meta_args=meta_args)
gm = torch.fx.GraphModule(model, graph, model.__class__.__name__)
annotated_model = balanced_split_pass(gm, stage_num)
split_model, _ = split_with_split_nodes_pass(annotated_model, merge_output=True)
return list(split_model.children())[pp_rank]
def partition(data_kwargs: dict, pp_rank: int, chunk: int, stage_num: int):
torch.manual_seed(1024)
model = MLP(dim, stage_num * 3)
partition = create_partition_module(pp_rank, stage_num, model, data_kwargs)
return partition
def run_master(args):
torch.manual_seed(100)
epoch = args.epoch
device = args.device
stage_num = args.world_size
chunk = args.chunk
num_microbatches = args.num_microbatches
use_checkpoint = args.use_checkpoint
input_sample = torch.randn((batch_size, dim), device=device)
def data_gen():
x = torch.zeros((batch_size, dim))
kwargs = dict(x=x)
return kwargs
data_kwargs = data_gen()
engine = OneFOneBPipelineEngine(partition_fn=partial(partition, data_kwargs),
stage_num=stage_num,
num_microbatches=num_microbatches,
device=device,
chunk=chunk,
checkpoint=use_checkpoint)
for _ in range(epoch):
logits = engine.forward_backward({'x': input_sample}, forward_only=True)
if __name__ == "__main__":
args = parse_args()
rpc_run(args, run_master)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment