test_cache_embedding.py 10.5 KB
Newer Older
1
2
import pytest
from functools import partial
Jiarui Fang's avatar
Jiarui Fang committed
3

4
import numpy as np
5
import random
6

Jiarui Fang's avatar
Jiarui Fang committed
7
8
9
import torch
import torch.multiprocessing as mp

10
import colossalai
11
12
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
13
14
from colossalai.tensor import ColoParameter, ProcessGroup, ShardSpec, ComputePattern, ComputeSpec, \
    ColoTensor, ColoTensorSpec
15
from colossalai.nn.parallel.layers import CachedParamMgr, FreqAwareEmbeddingBag, ParallelFreqAwareEmbeddingBag, EvictionStrategy
16

17
NUM_EMBED, EMBED_DIM = 10, 8
18
19
20
BATCH_SIZE = 8


21
22
23
24
25
26
27
28
29
def set_seed(seed):
    """
    To achieve reproducible results, it's necessary to fix random seeds
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)


30
31
32
33
34
35
36
37
38
39
40
41
42
43
def synthesize_1d_sparse_feature(
    batch_size,
    num_embed,
    device,
):
    indices_in_batch = batch_size * 2
    indices = torch.randint(low=0, high=num_embed, size=(indices_in_batch,), device=device, dtype=torch.long)
    offsets = torch.from_numpy(
        np.array([
            0, *np.sort(np.random.randint(low=0, high=indices_in_batch, size=(indices_in_batch - 1,))), indices_in_batch
        ])).to(device).long()
    return indices, offsets


44
@pytest.mark.skip
45
46
47
def test_cachemgr():
    model = torch.nn.EmbeddingBag(10000, 128)
    # 10 chunks, 5 in cuda
48
    mgr = CachedParamMgr(model.weight.detach(), 5)
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    assert mgr.cuda_row_num == 5

    mgr._admit(1)
    assert not mgr._chunk_in_cuda(2)
    assert mgr._chunk_in_cuda(1)

    # print(mgr.cached_chunk_table)
    mgr._admit(8)

    # now 3 chunk is available
    assert mgr.cuda_available_chunk_num == 3

    mgr._evict()
    assert mgr.cuda_available_chunk_num == 4

    mgr._prepare_rows_on_cuda(torch.tensor([9, 6, 5], dtype=torch.long, device=0))
    mgr._prepare_rows_on_cuda(torch.tensor([3, 4, 5], dtype=torch.long, device=0))
    # print(mgr.cached_chunk_table)
    # mgr.print_comm_stats()

    mgr.flush()
    assert mgr.cuda_available_chunk_num == 5


def test_reorder_with_freq():
    num_embed = 100
    chunk_size = 1
    num_chunk = 5

78
79
    idx_map = torch.randint(10000, size=(num_embed,))
    sorted_idx = torch.argsort(idx_map, descending=True).tolist()
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
    chunkid, offset_in_chunk = [], []
    for i in range(num_embed):
        idx = sorted_idx.index(i)
        chunkid.append(idx // chunk_size)
        offset_in_chunk.append(idx % chunk_size)

    chunkid = torch.tensor(chunkid, dtype=torch.long, device=torch.cuda.current_device())
    offset_in_chunk = torch.tensor(offset_in_chunk, dtype=torch.long, device=torch.cuda.current_device())

    weight = torch.rand(num_embed, 2)
    mgr = CachedParamMgr(weight, num_chunk)

    mgr.reorder(idx_map)

    indices = mgr.idx_map.index_select(0, torch.arange(num_embed, dtype=torch.long, device=torch.cuda.current_device()))
    mgr_chunk_id = torch.div(indices, chunk_size, rounding_mode='floor')
    mgr_offsets = torch.remainder(indices, chunk_size)
    assert torch.allclose(chunkid, mgr_chunk_id), f"chunk id: {chunkid}, mgr: {mgr_chunk_id}"
    assert torch.allclose(offset_in_chunk, mgr_offsets), \
        f"offset in chunk: {offset_in_chunk}, mgr: {mgr_offsets}"


102
103
@pytest.mark.parametrize('use_LFU', [True, False])
def test_freq_aware_embed(use_LFU: bool):
104
    device = torch.device('cuda', 0)
105
    evict_strategy = EvictionStrategy.LFU if use_LFU else EvictionStrategy.DATASET
106
107
108
109
110
    model = FreqAwareEmbeddingBag(NUM_EMBED,
                                  EMBED_DIM,
                                  mode='mean',
                                  include_last_offset=True,
                                  cuda_row_num=BATCH_SIZE * 2,
111
112
                                  ids_freq_mapping=None,
                                  evict_strategy=evict_strategy).to(device)
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

    assert model.weight.shape[0] == NUM_EMBED
    ref_model = torch.nn.EmbeddingBag.from_pretrained(model.weight.detach().to(device),
                                                      mode='mean',
                                                      include_last_offset=True,
                                                      freeze=False)

    assert torch.allclose(ref_model.weight.detach(), model.weight.detach().to(device))

    optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
    ref_optimizer = torch.optim.SGD(ref_model.parameters(), lr=1e-3)

    for i in range(5):
        indices, offsets = synthesize_1d_sparse_feature(BATCH_SIZE, NUM_EMBED, device)
        res = model(indices, offsets)
        ref_res = ref_model(indices, offsets)
        assert torch.allclose(res, ref_res), f"model result: {res}, reference: {ref_res}"

        grad = torch.rand_like(res)
        # comparing gradient here is nontrivial
        res.backward(grad)
        ref_res.backward(grad)
        optimizer.step()
        optimizer.zero_grad()

        ref_optimizer.step()
        ref_optimizer.zero_grad()

    model.cache_weight_mgr.flush()
    model_weight = model.weight.detach().to(device)
    ref_weight = ref_model.weight.detach()
    assert torch.allclose(model_weight, ref_weight), \
        f"model weight: {model_weight[10:18, :8]}, reference: {ref_weight[10:18, :8]}"

CsRic's avatar
CsRic committed
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def test_lfu_strategy():
    # minimal test to check behavior
    Bag = FreqAwareEmbeddingBag(
        5,
        5,
        cuda_row_num=3,
        buffer_size=0,
        pin_weight=True,
        warmup_ratio=0.0,
        evict_strategy=EvictionStrategy.LFU
    )

    offsets = torch.tensor([0],device="cuda:0")

    # prepare frequency learning info:
162
163
164
    Bag.forward(torch.tensor([2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([1,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,2],device="cuda:0"),offsets)
CsRic's avatar
CsRic committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0,2],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0],device="cuda:0"),offsets)
    Bag.forward(torch.tensor([0],device="cuda:0"),offsets)

    # check strategy
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
CsRic's avatar
CsRic committed
180
    Bag.forward(torch.tensor([0,1,2],device="cuda:0"),offsets)
CsRic's avatar
CsRic committed
181
182
    Bag.forward(torch.tensor([3],device="cuda:0"),offsets) # miss, evict 1
    Bag.forward(torch.tensor([2],device="cuda:0"),offsets) # hit
CsRic's avatar
CsRic committed
183
    Bag.forward(torch.tensor([4],device="cuda:0"),offsets) # miss, evict 3
CsRic's avatar
CsRic committed
184
185
186
187
188
    Bag.forward(torch.tensor([2],device="cuda:0"),offsets) # hit 
    Bag.forward(torch.tensor([0],device="cuda:0"),offsets) # hit

    assert torch.allclose(torch.Tensor(Bag.cache_weight_mgr.num_hits_history[-6:]), torch.Tensor([3, 0, 1, 0, 1, 1])), \
        "LFU strategy behavior failed"
189
    
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
def gather_tensor(tensor, rank, world_size):
    gather_list = []
    if rank == 0:
        gather_list = [torch.empty_like(tensor) for _ in range(world_size)]

    torch.distributed.gather(tensor, gather_list, dst=0)
    return gather_list


def run_parallel_freq_aware_embed(rank, world_size):
    device = torch.device('cuda', torch.cuda.current_device())

    num_embed = 100
    embed_dim = 16
    batch_size = 4

    set_seed(4321)
    weight = torch.rand(num_embed, embed_dim)
208
    coloweight = ColoTensor(weight.clone().detach().cpu(), spec=None)
209

210
211
    # initialize the tensor spec for the embedding weight parameter,
    # which is an ColoParameter.
212
    coloweight.set_process_group(ProcessGroup(tp_degree=world_size))
213
214
    coloweight.set_tensor_spec(ShardSpec(dims=[-1], num_partitions=[world_size]), ComputeSpec(ComputePattern.TP1D))

215
216
217
218
219
    model = ParallelFreqAwareEmbeddingBag.from_pretrained(coloweight,
                                                          include_last_offset=True,
                                                          freeze=False,
                                                          cuda_row_num=batch_size * 2)

220
    assert model.cache_weight_mgr.weight.device.type == 'cpu'
221
222
    assert model.cache_weight_mgr.cuda_cached_weight.requires_grad
    weight_in_rank = torch.tensor_split(weight, world_size, -1)[rank]
223
224
225
    print(f"model weight: {model.cache_weight_mgr.weight.shape}, ref weight: {weight_in_rank.shape}")
    assert torch.allclose(weight_in_rank,
                          model.cache_weight_mgr.weight.detach()), f"{weight_in_rank - model.cache_weight_mgr.weight}"
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259

    optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)

    if rank == 0:
        ref_model = torch.nn.EmbeddingBag.from_pretrained(weight.detach().clone(),
                                                          include_last_offset=True,
                                                          freeze=False).to(device)
        ref_optimizer = torch.optim.SGD(ref_model.parameters(), lr=1e-3)

    set_seed(4321)
    for i in range(5):
        indices, offsets = synthesize_1d_sparse_feature(batch_size, num_embed, device)
        res = model(indices, offsets)

        grad = torch.rand(batch_size * 2, embed_dim, dtype=res.dtype, device=res.device)
        grad_in_rank = torch.tensor_split(grad, world_size, 0)[rank]
        res.backward(grad_in_rank)

        optimizer.step()
        optimizer.zero_grad()

        res_list = gather_tensor(res.detach(), rank, world_size)

        if rank == 0:
            ref_res = ref_model(indices, offsets)
            recover_res = torch.cat(res_list, dim=0)

            assert torch.allclose(ref_res, recover_res)

            ref_res.backward(grad)
            ref_optimizer.step()
            ref_optimizer.zero_grad()

    model.cache_weight_mgr.flush()
260
    weight_list = gather_tensor(model.cache_weight_mgr.weight.detach().cuda(), rank, world_size)
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
    if rank == 0:
        recover_weight = torch.cat(weight_list, dim=1)
        assert torch.allclose(recover_weight, ref_model.weight.detach()), f"{recover_weight - ref_model.weight}"


def run_dist(rank, world_size, port):
    colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
    run_parallel_freq_aware_embed(rank, world_size)


@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@rerun_if_address_is_in_use()
def test_parallel_freq_aware_embed(world_size):
    run_func = partial(run_dist, world_size=world_size, port=free_port())
    mp.spawn(run_func, nprocs=world_size)


279
if __name__ == '__main__':
280
    # test_freq_aware_embed(True)
281
    # test_parallel_freq_aware_embed(2)
282
    test_lfu_strategy()