jiuge.cpp 17.7 KB
Newer Older
PanZezhong's avatar
init  
PanZezhong committed
1
2
3
4
5
#include "jiuge_impl.hpp"
#include "jiuge_weight.hpp"

#include "../../tensor.hpp"
#include "../../utils.hpp"
wooway777's avatar
wooway777 committed
6
#include "../inference_context.hpp"
PanZezhong's avatar
init  
PanZezhong committed
7
8
9
10
11
12
#include "infinicore_infer.h"

#include <random>
#include <thread>
#include <vector>

blkmjsian's avatar
blkmjsian committed
13
void createDeviceResource(JiugeDeviceResource *rsrc, const JiugeMeta *meta,
PanZezhong's avatar
init  
PanZezhong committed
14
15
16
17
18
19
20
21
22
23
24
25
26
27
                          const JiugeWeights *weights,
                          infiniDevice_t device, int idev,
                          int ndev, int dev_id,
                          infinicclComm_t comm) {
    RUN_INFINI(infinirtSetDevice(device, dev_id));
    infiniopHandle_t handle;
    infiniopCreateHandle(&handle);
    infinirtStream_t stream;
    infinirtStreamCreate(&stream);

    std::vector<std::shared_ptr<Tensor>> w_attn_norm, w_attn_qkv, b_attn_qkv, w_attn_out,
        w_ffn_norm, w_ffn_gate_up, w_ffn_down;
    for (size_t layer = 0; layer < meta->nlayer; layer++) {
        w_attn_norm.push_back(
PanZezhong's avatar
PanZezhong committed
28
            getAttnNorm(meta, weights, layer));
PanZezhong's avatar
init  
PanZezhong committed
29
        w_attn_qkv.push_back(
PanZezhong's avatar
PanZezhong committed
30
            getAttnQKV(meta, weights, layer, idev, ndev));
PanZezhong's avatar
init  
PanZezhong committed
31
32
        if (weights->attn_qkv_b != nullptr) {
            b_attn_qkv.push_back(
PanZezhong's avatar
PanZezhong committed
33
                getAttnQKVBias(meta, weights, layer, idev, ndev));
PanZezhong's avatar
init  
PanZezhong committed
34
35
        }
        w_attn_out.push_back(
PanZezhong's avatar
PanZezhong committed
36
            getAttnO(meta, weights, layer, idev, ndev));
PanZezhong's avatar
init  
PanZezhong committed
37
        w_ffn_norm.push_back(
PanZezhong's avatar
PanZezhong committed
38
            getFFNNorm(meta, weights, layer));
PanZezhong's avatar
init  
PanZezhong committed
39
        w_ffn_gate_up.push_back(
PanZezhong's avatar
PanZezhong committed
40
            getFFNGateUp(meta, weights, layer, idev, ndev));
PanZezhong's avatar
init  
PanZezhong committed
41
        w_ffn_down.push_back(
PanZezhong's avatar
PanZezhong committed
42
            getFFNDown(meta, weights, layer, idev, ndev));
PanZezhong's avatar
init  
PanZezhong committed
43
44
    }

thatPepe's avatar
thatPepe committed
45
46
    auto memory_pool = std::make_shared<MemoryPool>(128 * 1024 * 1024);

blkmjsian's avatar
blkmjsian committed
47
    *rsrc = JiugeDeviceResource{
PanZezhong's avatar
PanZezhong committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
        device,
        dev_id,
        handle,
        getInEmbd(meta, weights),
        getOutNorm(meta, weights),
        getOutEmbd(meta, weights),
        getSinTable(meta),
        getCosTable(meta),
        w_attn_norm,
        w_attn_qkv,
        b_attn_qkv,
        w_attn_out,
        w_ffn_norm,
        w_ffn_gate_up,
        w_ffn_down,
        stream,
        comm,
thatPepe's avatar
thatPepe committed
65
        memory_pool,
PanZezhong's avatar
PanZezhong committed
66
    };
PanZezhong's avatar
PanZezhong committed
67
68
69
    RUN_INFINI(infinirtDeviceSynchronize());
}

blkmjsian's avatar
blkmjsian committed
70
void releaseDeviceResource(JiugeDeviceResource &res) {
PanZezhong's avatar
PanZezhong committed
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    infinirtDeviceSynchronize();
    // Release individual Tensors
    res.w_in_embd.reset();
    res.w_out_norm.reset();
    res.w_out_embd.reset();
    res.sin_table.reset();
    res.cos_table.reset();
    for (auto &t : res.w_attn_norm) {
        t.reset();
    }
    res.w_attn_norm.clear();
    for (auto &t : res.w_attn_qkv) {
        t.reset();
    }
    res.w_attn_qkv.clear();
    for (auto &t : res.b_attn_qkv) {
        t.reset();
    }
    res.b_attn_qkv.clear();
    for (auto &t : res.w_attn_out) {
        t.reset();
    }
    res.w_attn_out.clear();
    for (auto &t : res.w_ffn_norm) {
        t.reset();
    }
    res.w_ffn_norm.clear();
    for (auto &t : res.w_ffn_gate_up) {
        t.reset();
    }
    res.w_ffn_gate_up.clear();
    for (auto &t : res.w_ffn_down) {
        t.reset();
    }
    res.w_ffn_down.clear();
    infiniopDestroyHandle(res.handle);
    res.handle = nullptr;
    infinirtStreamDestroy(res.stream);
    res.stream = nullptr;
    infinicclCommDestroy(res.comm);
    res.comm = nullptr;
PanZezhong's avatar
init  
PanZezhong committed
112
113
}

blkmjsian's avatar
blkmjsian committed
114
void inferDeviceBatch(const JiugeMeta &meta, JiugeDeviceResource &rsrc,
PanZezhong's avatar
init  
PanZezhong committed
115
116
117
118
                      uint32_t idev, uint32_t ndev,
                      const uint32_t *tokens, uint32_t ntok,
                      const uint32_t *req_lens, uint32_t nreq, const uint32_t *req_pos,
                      struct KVCache **kv_caches,
Pan Zezhong's avatar
Pan Zezhong committed
119
                      const float *temperature, const uint32_t *topk, const float *topp,
PanZezhong's avatar
PanZezhong committed
120
                      uint32_t *output, void *last_logits) {
PanZezhong's avatar
init  
PanZezhong committed
121
122
123
    auto nlayer = meta.nlayer;
    auto nkvh = meta.nkvh / ndev;
    auto nh = meta.nh / ndev;
124
    auto ngroup = nh / nkvh;
PanZezhong's avatar
init  
PanZezhong committed
125
126
127
128
129
130
131
    // auto dctx = meta.dctx;
    auto dh = meta.dh;
    auto d = meta.d;
    auto dt_logits = meta.dt_logits;
    auto di = meta.di / ndev;
    auto dvoc = meta.dvoc;
    auto stream = rsrc.stream;
PanZezhong's avatar
PanZezhong committed
132
    bool has_qkv_bias = rsrc.b_attn_qkv.size() > 0;
PanZezhong's avatar
init  
PanZezhong committed
133
134

    // Allocate buffers
thatPepe's avatar
thatPepe committed
135
136
137
138
139
140
141
    auto logits_in = Tensor::buffer(dt_logits, {ntok, d}, rsrc.memory_pool);
    auto logits_out = Tensor::buffer(dt_logits, {ntok, d}, rsrc.memory_pool);
    auto qkv_buf = Tensor::buffer(dt_logits, {ntok, (nh + nkvh * 2) * dh}, rsrc.memory_pool);
    auto gate_up_buf = Tensor::buffer(dt_logits, {ntok, 2 * di}, rsrc.memory_pool);
    auto o_buf = Tensor::buffer(dt_logits, {ntok, nh * dh}, rsrc.memory_pool);
    auto prob_buf = Tensor::buffer(dt_logits, {nreq, dvoc}, rsrc.memory_pool);
    auto result_buf = Tensor::buffer(INFINI_DTYPE_I64, {nreq}, rsrc.memory_pool);
Pan Zezhong's avatar
Pan Zezhong committed
142
143
    auto result_cpu = std::vector<int64_t>(nreq);

144
    auto qkv_rope = qkv_buf->view({ntok, nh + nkvh * 2, dh});
145

PanZezhong's avatar
init  
PanZezhong committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
    // Prepare inputs
    auto batch_pos_ids = std::vector<uint32_t>(ntok);
    size_t req_start = 0;
    for (uint32_t req = 0; req < nreq; req++) {
        for (uint32_t i = 0; i < req_lens[req]; i++) {
            batch_pos_ids[req_start + i] = req_pos[req] + i;
        }
        req_start += req_lens[req];
    }

    std::shared_ptr<Tensor> pos_ids_buf;
    if (rsrc.device == INFINI_DEVICE_CPU) {
        pos_ids_buf = Tensor::weight(batch_pos_ids.data(), INFINI_DTYPE_U32, {ntok});
    } else {
thatPepe's avatar
thatPepe committed
160
        pos_ids_buf = Tensor::buffer(INFINI_DTYPE_U32, {ntok}, rsrc.memory_pool);
PanZezhong's avatar
init  
PanZezhong committed
161
162
163
164
165
166
167
168
169
170
171
        RUN_INFINI(infinirtMemcpyAsync(pos_ids_buf->data(), batch_pos_ids.data(), sizeof(uint32_t) * ntok,
                                       INFINIRT_MEMCPY_H2D, stream));
    }
    for (uint32_t i = 0; i < ntok; i++) {
        RUN_INFINI(infinirtMemcpyAsync(logits_in->data(i * d),
                                       rsrc.w_in_embd->data(tokens[i] * d),
                                       dsize(dt_logits) * d, INFINIRT_MEMCPY_D2D, stream));
    }

    // Attention
    // attention inner
172
173
    size_t max_qk_size = 0;
    size_t max_seq_len = 0;
wooway777's avatar
wooway777 committed
174

PanZezhong's avatar
init  
PanZezhong committed
175
176
177
    for (uint32_t req = 0; req < nreq; req++) {
        auto past_len = req_pos[req];
        auto seq_len = req_lens[req];
178
        auto total_len = past_len + seq_len;
wooway777's avatar
wooway777 committed
179

180
181
        max_qk_size = std::max(max_qk_size, size_t(seq_len * total_len));
        max_seq_len = std::max(max_seq_len, size_t(seq_len));
PanZezhong's avatar
init  
PanZezhong committed
182
    }
wooway777's avatar
wooway777 committed
183

184
    auto qk_buf = Tensor::buffer(dt_logits, {nh * max_qk_size}, rsrc.memory_pool);
thatPepe's avatar
thatPepe committed
185
    auto rearrange_q_buf = Tensor::buffer(dt_logits, {nkvh, ngroup * max_seq_len, dh}, rsrc.memory_pool);
186
    auto q_rearrange = rearrange_q_buf->view({nkvh, ngroup, max_seq_len, dh});
187
    auto attn_val_buf = Tensor::buffer(dt_logits, {nkvh, ngroup * max_seq_len, dh}, rsrc.memory_pool);
188
    auto attn_val_gemm = attn_val_buf->view({nkvh, ngroup, max_seq_len, dh});
PanZezhong's avatar
init  
PanZezhong committed
189

wooway777's avatar
wooway777 committed
190
    // MLP buffers
PanZezhong's avatar
init  
PanZezhong committed
191
192
193
    auto gate_buf = gate_up_buf->slice(1, 0, di);
    auto up_buf = gate_up_buf->slice(1, di, di);

PanZezhong's avatar
PanZezhong committed
194
    // Compute
PanZezhong's avatar
init  
PanZezhong committed
195
196
197
    for (uint32_t layer = 0; layer < nlayer; layer++) {
        // 1. Attention
        // rms norm
198
        rmsnorm(logits_out, logits_in, rsrc.w_attn_norm[layer], meta.epsilon);
PanZezhong's avatar
init  
PanZezhong committed
199
        // qkv_proj
200
        linear(qkv_buf, logits_out, rsrc.w_attn_qkv[layer], 1.0, 0.0, nullptr, has_qkv_bias ? rsrc.b_attn_qkv[layer] : nullptr);
PanZezhong's avatar
init  
PanZezhong committed
201
        // rope
202
203
        rope(qkv_rope->slice(1, 0, nh), qkv_rope->slice(1, 0, nh), pos_ids_buf, rsrc.sin_table, rsrc.cos_table);
        rope(qkv_rope->slice(1, nh, nkvh), qkv_rope->slice(1, nh, nkvh), pos_ids_buf, rsrc.sin_table, rsrc.cos_table);
PanZezhong's avatar
init  
PanZezhong committed
204
205
206

        size_t token_offset = 0;
        for (uint32_t req = 0; req < nreq; req++) {
207
            auto past_len = req_pos[req];
PanZezhong's avatar
init  
PanZezhong committed
208
            auto seq_len = req_lens[req];
wooway777's avatar
wooway777 committed
209
            auto total_len = past_len + seq_len;
210
            auto o = o_buf->slice({{0, token_offset, seq_len}})->view({seq_len, nkvh, ngroup, dh})->permute({1, 2, 0, 3});
211
            auto q = qkv_rope->slice({{0, token_offset, seq_len}, {1, 0, nh}})->view({seq_len, nkvh, ngroup, dh})->permute({1, 2, 0, 3});
212
213
            auto k = qkv_rope->slice({{0, token_offset, seq_len}, {1, nh, nkvh}});
            auto v = qkv_rope->slice({{0, token_offset, seq_len}, {1, nh + nkvh, nkvh}});
wooway777's avatar
wooway777 committed
214

PanZezhong's avatar
init  
PanZezhong committed
215
            // self attention
216
            // concat
217
218
            rearrange(kv_caches[req]->k[idev][layer]->slice(0, past_len, seq_len), k);
            rearrange(kv_caches[req]->v[idev][layer]->slice(0, past_len, seq_len), v);
219
            // qk
220
            rearrange(q_rearrange->slice(2, 0, seq_len), q);
221
            auto qk_gemm = qk_buf->slice(0, 0, nh * seq_len * total_len)->view({nkvh, ngroup * seq_len, total_len});
222
            auto k_gemm = kv_caches[req]->k[idev][layer]->slice(0, 0, total_len)->permute({1, 2, 0});
223
            linear(qk_gemm, rearrange_q_buf->slice(1, 0, ngroup * seq_len), k_gemm, 1.f / float(sqrt(dh)), 0.f, nullptr, nullptr);
224
            // softmax
225
            auto qk_softmax = qk_gemm->view({nh, seq_len, total_len});
226
            causalSoftmax(qk_softmax, qk_softmax);
227
            auto v_gemm = kv_caches[req]->v[idev][layer]->slice(0, 0, total_len)->permute({1, 0, 2});
228
            linear(attn_val_buf->slice(1, 0, ngroup * seq_len), qk_gemm, v_gemm, 1.f, 0.f, nullptr, nullptr);
229
            // rearrange attn val
230
            rearrange(o, attn_val_gemm->slice(2, 0, seq_len));
PanZezhong's avatar
init  
PanZezhong committed
231
232
233

            token_offset += seq_len;
        }
234

PanZezhong's avatar
init  
PanZezhong committed
235
        // o_proj
236
        linear(logits_in, o_buf, rsrc.w_attn_out[layer], 1.0, 0.0, idev == 0 ? logits_in : nullptr, nullptr); // only rank 0 adds residual
PanZezhong's avatar
init  
PanZezhong committed
237
238
239
240
241
242

        // All_reduce if distributed
        if (rsrc.comm != nullptr) {
            RUN_INFINI(infinicclAllReduce(
                logits_in->data(), logits_in->data(), ntok * d, dt_logits,
                INFINICCL_SUM, rsrc.comm, stream));
PanZezhong's avatar
PanZezhong committed
243
            RUN_INFINI(infinirtStreamSynchronize(stream));
PanZezhong's avatar
init  
PanZezhong committed
244
245
        }
        // 2. FFN
246
        rmsnorm(logits_out, logits_in, rsrc.w_ffn_norm[layer], meta.epsilon);
247
        linear(gate_up_buf, logits_out, rsrc.w_ffn_gate_up[layer], 1.0, 0.0, nullptr, nullptr);
248
        swiglu(gate_buf, up_buf, gate_buf);
249
        linear(logits_in, gate_buf, rsrc.w_ffn_down[layer], 1.0, 0.0, idev == 0 ? logits_in : nullptr, nullptr); // only rank 0 adds residual
PanZezhong's avatar
init  
PanZezhong committed
250
251
252
253
254
255

        // All_reduce if distributed
        if (rsrc.comm != nullptr) {
            RUN_INFINI(infinicclAllReduce(
                logits_in->data(), logits_in->data(), ntok * d, dt_logits,
                INFINICCL_SUM, rsrc.comm, stream));
PanZezhong's avatar
PanZezhong committed
256
            RUN_INFINI(infinirtStreamSynchronize(stream));
PanZezhong's avatar
init  
PanZezhong committed
257
258
259
260
        }
    }
    // Sample and Output
    if (idev == 0) {
PanZezhong's avatar
PanZezhong committed
261
262
263
264
265
266
        if (last_logits != nullptr) {
            rmsnorm(logits_out, logits_in, rsrc.w_out_norm, meta.epsilon);
            auto last_logits_buf = Tensor::buffer(dt_logits, {ntok, dvoc}, rsrc.memory_pool);
            linear(last_logits_buf, logits_out, rsrc.w_out_embd, 1.0, 0.0, nullptr, nullptr);
            RUN_INFINI(infinirtStreamSynchronize(stream));
            RUN_INFINI(infinirtMemcpy(last_logits, last_logits_buf->data(), dsize(dt_logits) * ntok * dvoc, INFINIRT_MEMCPY_D2H));
PanZezhong's avatar
init  
PanZezhong committed
267
        }
PanZezhong's avatar
PanZezhong committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
        if (output != nullptr) {
            size_t token_offset = 0;
            for (uint32_t req = 0; req < nreq; req++) {
                auto seq_len = req_lens[req];
                token_offset += seq_len;
                rmsnorm(logits_out->slice(0, req, 1),
                        logits_in->slice(0, token_offset - 1, 1),
                        rsrc.w_out_norm,
                        meta.epsilon);
            }
            linear(prob_buf, logits_out->slice(0, 0, nreq), rsrc.w_out_embd, 1.0, 0.0, nullptr, nullptr);
            std::random_device _rd;
            std::mt19937 gen(_rd());
            token_offset = 0;
            for (uint32_t req = 0; req < nreq; req++) {
                auto seq_len = req_lens[req];
                float random_val = std::uniform_real_distribution<float>(0, 1)(gen);
285
286
                randomSample(result_buf->slice(0, req, 1)->view_as({}, {}),
                             prob_buf->slice(0, req, 1)->view_as({dvoc}, {1}),
PanZezhong's avatar
PanZezhong committed
287
288
289
290
291
292
293
294
295
                             random_val, topp[req], topk[req], temperature[req]);
                token_offset += seq_len;
            }
            RUN_INFINI(infinirtStreamSynchronize(stream));
            RUN_INFINI(infinirtMemcpy(result_cpu.data(), result_buf->data(),
                                      sizeof(int64_t) * nreq, INFINIRT_MEMCPY_D2H));
            for (uint32_t req = 0; req < nreq; req++) {
                output[req] = uint32_t(result_cpu[req]);
            }
PanZezhong's avatar
init  
PanZezhong committed
296
297
298
299
300
        }
    }
}

__C void
blkmjsian's avatar
blkmjsian committed
301
inferBatchJiuge(struct JiugeModel *model,
PanZezhong's avatar
init  
PanZezhong committed
302
303
304
           const uint32_t *tokens, uint32_t ntok,
           const uint32_t *req_lens, uint32_t nreq, const uint32_t *req_pos,
           struct KVCache **kv_caches,
Pan Zezhong's avatar
Pan Zezhong committed
305
306
           const float *temperature, const uint32_t *topk, const float *topp,
           uint32_t *output) {
PanZezhong's avatar
init  
PanZezhong committed
307
308
309
310
311
312
    model->req.tokens = tokens;
    model->req.ntok = ntok;
    model->req.req_lens = req_lens;
    model->req.nreq = nreq;
    model->req.req_pos = req_pos;
    model->req.kv_caches = kv_caches;
Pan Zezhong's avatar
Pan Zezhong committed
313
    model->req.output = output;
PanZezhong's avatar
PanZezhong committed
314
    model->req.logits = nullptr;
PanZezhong's avatar
init  
PanZezhong committed
315
316
317
318
319
320
321
322
    model->req.temperature = temperature;
    model->req.topk = topk;
    model->req.topp = topp;

    for (size_t idev = 0; idev < model->dev_ids.size(); idev++) {
        std::unique_lock<std::mutex> lock(model->states[idev].mtx);
        model->states[idev].proceed = true;
        lock.unlock();
PanZezhong's avatar
PanZezhong committed
323
324
325
326
327
328
329
        model->states[idev].cv_start.notify_one();
    }
    for (size_t i = model->dev_ids.size(); i > 0; i--) {
        auto idev = i - 1;
        std::unique_lock<std::mutex> lock(model->states[idev].mtx);
        model->states[idev].cv_done.wait(lock, [&] { return !(model->states[idev].proceed); });
        lock.unlock();
PanZezhong's avatar
init  
PanZezhong committed
330
331
332
    }
}

PanZezhong's avatar
PanZezhong committed
333
__C void
blkmjsian's avatar
blkmjsian committed
334
forwardBatchJiuge(struct JiugeModel *model,
PanZezhong's avatar
PanZezhong committed
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
             const uint32_t *tokens, uint32_t ntok,
             const uint32_t *req_lens, uint32_t nreq, const uint32_t *req_pos,
             struct KVCache **kv_caches,
             void *logits) {
    model->req.tokens = tokens;
    model->req.ntok = ntok;
    model->req.req_lens = req_lens;
    model->req.nreq = nreq;
    model->req.req_pos = req_pos;
    model->req.kv_caches = kv_caches;
    model->req.output = nullptr;
    model->req.logits = logits;
    model->req.temperature = nullptr;
    model->req.topk = nullptr;
    model->req.topp = nullptr;

    for (size_t idev = 0; idev < model->dev_ids.size(); idev++) {
        std::unique_lock<std::mutex> lock(model->states[idev].mtx);
        model->states[idev].proceed = true;
        lock.unlock();
        model->states[idev].cv_start.notify_one();
    }
    for (size_t i = model->dev_ids.size(); i > 0; i--) {
        auto idev = i - 1;
        std::unique_lock<std::mutex> lock(model->states[idev].mtx);
        model->states[idev].cv_done.wait(lock, [&] { return !(model->states[idev].proceed); });
        lock.unlock();
    }
}

blkmjsian's avatar
blkmjsian committed
365
void launchDevice(const JiugeMeta &meta, const JiugeWeights *weights, JiugeDeviceResource *rsrc, InferState &state, InferRequest &req,
PanZezhong's avatar
init  
PanZezhong committed
366
                  infiniDevice_t device, int idev, int ndev, int dev_id, infinicclComm_t comm) {
blkmjsian's avatar
blkmjsian committed
367
368
369
    // Create Device Resource
    createDeviceResource(rsrc, &meta, weights, device, idev, ndev, dev_id, comm);

370
    CacheManager cache_manager(100);
blkmjsian's avatar
blkmjsian committed
371
    InferenceContext ctx(rsrc->handle, rsrc->memory_pool, &cache_manager, rsrc->stream);
wooway777's avatar
wooway777 committed
372

373
374
375
    // Set the inference context for this thread
    setInferenceContext(&ctx);

PanZezhong's avatar
PanZezhong committed
376
377
378
379
380
381
382
383
    {
        std::unique_lock<std::mutex> lock(state.mtx);
        state.loaded = true;
        lock.unlock();
        state.cv_load.notify_one();
    }

    // Infer Loop
PanZezhong's avatar
init  
PanZezhong committed
384
385
    while (true) {
        std::unique_lock<std::mutex> lock(state.mtx);
PanZezhong's avatar
PanZezhong committed
386
        state.cv_start.wait(lock, [&] { return state.proceed || state.exit_flag; });
PanZezhong's avatar
PanZezhong committed
387
        // quit if exit_flag is set
PanZezhong's avatar
init  
PanZezhong committed
388
389
390
391
        if (state.exit_flag) {
            break;
        }

wooway777's avatar
wooway777 committed
392
393
        inferDeviceBatch(meta, *rsrc, idev, ndev, req.tokens, req.ntok,
                         req.req_lens, req.nreq, req.req_pos, req.kv_caches,
PanZezhong's avatar
PanZezhong committed
394
                         req.temperature, req.topk, req.topp, req.output, req.logits);
PanZezhong's avatar
init  
PanZezhong committed
395
396
397

        state.proceed = false;
        lock.unlock();
PanZezhong's avatar
PanZezhong committed
398
        state.cv_done.notify_one();
PanZezhong's avatar
init  
PanZezhong committed
399
400
    }

PanZezhong's avatar
PanZezhong committed
401
402
    // Clean-Up
    releaseDeviceResource(*rsrc);
403
    setInferenceContext(nullptr); // Clear the context when done
PanZezhong's avatar
init  
PanZezhong committed
404
405
}

PanZezhong's avatar
PanZezhong committed
406
JiugeModel::JiugeModel(const JiugeMeta *_meta, const JiugeWeights *weights, infiniDevice_t device_, std::vector<int> device_ids) : meta(*_meta) {
PanZezhong's avatar
init  
PanZezhong committed
407
    int ndev = int(device_ids.size());
PanZezhong's avatar
PanZezhong committed
408
    device = device_;
PanZezhong's avatar
init  
PanZezhong committed
409
    dev_ids = device_ids;
blkmjsian's avatar
blkmjsian committed
410
    dev_resources = std::vector<JiugeDeviceResource>(ndev);
PanZezhong's avatar
init  
PanZezhong committed
411
412
413
414
415
416
417
418
419
420
421
    states = std::vector<InferState>(ndev);
    threads.resize(ndev);
    RUN_INFINI(infinirtInit());
    auto comms = std::vector<infinicclComm_t>(ndev, nullptr);
    if (ndev > 1) {
        RUN_INFINI(infinicclCommInitAll(device, comms.data(), ndev, dev_ids.data()));
    }

    for (int i = 0; i < ndev; i++) {
        threads[i] = std::thread(launchDevice, std::cref(meta), weights, &dev_resources[i], std::ref(states[i]), std::ref(req), device, i, ndev, dev_ids[i], comms[i]);
    }
PanZezhong's avatar
PanZezhong committed
422
423
424
425
426
    for (int i = 0; i < ndev; i++) {
        std::unique_lock<std::mutex> lock(states[i].mtx);
        states[i].cv_load.wait(lock, [&] { return states[i].loaded; });
        lock.unlock();
    }
PanZezhong's avatar
init  
PanZezhong committed
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
}

__C struct JiugeModel *
createJiugeModel(const JiugeMeta *meta,
                 const JiugeWeights *weights,
                 infiniDevice_t device,
                 int ndev,
                 const int *dev_ids) {
    std::vector<int> device_ids(ndev);
    std::copy(dev_ids, dev_ids + ndev, device_ids.begin());
    JiugeModel *model = new JiugeModel(meta, weights, device, device_ids);
    return model;
}

__C void destroyJiugeModel(struct JiugeModel *model) {
    auto ndev = model->dev_resources.size();

    for (size_t idev = 0; idev < ndev; idev++) {
        std::unique_lock<std::mutex> lock(model->states[idev].mtx);
        model->states[idev].exit_flag = true;
        lock.unlock();
PanZezhong's avatar
PanZezhong committed
448
        model->states[idev].cv_start.notify_one();
PanZezhong's avatar
init  
PanZezhong committed
449
450
451
452
453
454
455
    }

    for (size_t idev = 0; idev < ndev; idev++) {
        model->threads[idev].join();
    }

    delete model;
456
}