LlamaContextDecoder.h 4.67 KB
Newer Older
Li Zhang's avatar
Li Zhang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
/*
 * Copyright (c) OpenMMLab. All rights reserved.
 * Copyright (c) 2019-2023, NVIDIA CORPORATION.  All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

AllentDan's avatar
AllentDan committed
18
// Modified from
lvhan028's avatar
lvhan028 committed
19
// https://github.com/NVIDIA/FasterTransformer/blob/main/src/turbomind/models/multi_gpu_gpt/ParallelGptContextDecoder.h
Li Zhang's avatar
Li Zhang committed
20
21
22

#pragma once

lvhan028's avatar
lvhan028 committed
23
24
25
26
#include "src/turbomind/layers/BaseLayer.h"
#include "src/turbomind/models/llama/LlamaContextAttentionLayer.h"
#include "src/turbomind/models/llama/LlamaDecoderLayerWeight.h"
#include "src/turbomind/models/llama/LlamaFfnLayer.h"
27
#include "src/turbomind/models/llama/llama_params.h"
lvhan028's avatar
lvhan028 committed
28
29
30
31
32
33
34
#include "src/turbomind/utils/Tensor.h"
#include "src/turbomind/utils/allocator.h"
#include "src/turbomind/utils/cublasMMWrapper.h"
#include "src/turbomind/utils/custom_ar_comm.h"
#include "src/turbomind/utils/nccl_utils.h"

namespace turbomind {
Li Zhang's avatar
Li Zhang committed
35
36
37
38
39
40
41
42

template<typename T>
class LlamaContextDecoder: public BaseLayer {
protected:
    void allocateBuffer() override;
    void allocateBuffer(size_t batch_size, size_t num_token, size_t max_q_len, size_t max_kv_len);
    void freeBuffer() override;

Li Zhang's avatar
Li Zhang committed
43
44
45
46
47
    void initialize(const LlamaAttentionParams& attn_params,
                    size_t                      kv_head_num,
                    bool                        use_fmha,
                    int                         cache_block_seq_len,
                    int                         quant_policy);
Li Zhang's avatar
Li Zhang committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

    size_t head_num_;
    size_t size_per_head_;
    size_t inter_size_;
    size_t num_layer_;
    size_t hidden_units_;
    float  rmsnorm_eps_;

    NcclParam tensor_para_;

    T*   attention_mask_{};
    int* padding_offset_{};
    int* cu_seqlens_{};  // cu for cumulative

    size_t* h_pinned_token_num_ptr_{};

    LlamaContextAttentionLayer<T>* context_attention_layer_{};
    LlamaFfnLayer<T>*              silu_ffn_layer_{};

    const DataType data_type_;

    struct Session {
Li Zhang's avatar
Li Zhang committed
70
71
72
73
74
75
        size_t batch_size;
        size_t token_num;
        size_t max_query_len;
        size_t max_key_len;
        int*   input_length{};
        int*   context_length{};
Li Zhang's avatar
Li Zhang committed
76
77
78
79
80

        const std::vector<LlamaDecoderLayerWeight<T>*>* weights;
    };

    void forwardSelfAttn(const Session&                                 sess,
81
                         T*                                             attn_io,
Li Zhang's avatar
Li Zhang committed
82
                         std::unordered_map<std::string, Tensor>*       output_tensors,
Li Zhang's avatar
Li Zhang committed
83
84
85
86
87
                         const std::unordered_map<std::string, Tensor>* input_tensors,
                         int                                            layer,
                         bool                                           is_final);

public:
88
89
90
91
92
93
94
95
96
97
98
99
100
    LlamaContextDecoder(size_t                      head_num,
                        size_t                      kv_head_num,
                        size_t                      size_per_head,
                        size_t                      inter_size,
                        size_t                      num_layer,
                        const LlamaAttentionParams& attn_params,
                        float                       rmsnorm_eps,
                        NcclParam                   tensor_para,
                        cudaStream_t                stream,
                        cublasMMWrapper*            cublas_wrapper,
                        IAllocator*                 allocator,
                        bool                        is_free_buffer_after_forward,
                        bool                        use_fmha,
Li Zhang's avatar
Li Zhang committed
101
                        int                         cache_block_seq_len,
102
                        int                         quant_policy);
Li Zhang's avatar
Li Zhang committed
103
104
105
106
107
108
109
110
111
112
113
114

    ~LlamaContextDecoder() override;

    virtual void forward(std::unordered_map<std::string, Tensor>*        output_tensors,
                         const std::unordered_map<std::string, Tensor>*  input_tensors,
                         const std::vector<LlamaDecoderLayerWeight<T>*>* decoder_layer_weights);

    virtual void forward(std::vector<Tensor>*                            output_tensors,
                         const std::vector<Tensor>*                      input_tensors,
                         const std::vector<LlamaDecoderLayerWeight<T>*>* decoder_layer_weights);
};

lvhan028's avatar
lvhan028 committed
115
}  // namespace turbomind