llama_model.hpp 2.62 KB
Newer Older
1
2
#pragma once

3
#include "../../cache/kv_cache.hpp"
4
5
6
#include "llama_config.hpp"
#include "llama_decoder_layer.hpp"

7
#include "infinicore/nn/embedding.hpp"
Your Name's avatar
Your Name committed
8
#include "infinicore/nn/module.hpp"
9
10
11
#include "infinicore/nn/rmsnorm.hpp"
#include "infinicore/nn/rope.hpp"
#include "infinicore/tensor.hpp"
Your Name's avatar
Your Name committed
12
13
#include "llama_config.hpp"
#include "llama_decoder_layer.hpp"
Ceng's avatar
Ceng committed
14
#include <memory>
Your Name's avatar
Your Name committed
15
16
17
#include <vector>

#include "../../engine/distributed/distributed.hpp"
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

namespace infinilm::models::llama {

/**
 * @brief Main Llama model architecture (without language modeling head)
 *
 * This is the core transformer model consisting of:
 * - Token embeddings (embed_tokens)
 * - Multiple decoder layers (layers)
 * - Final layer normalization (norm)
 * - Rotary Position Embeddings (rotary_emb)
 *
 * This matches the structure of HuggingFace's LlamaModel.
 */
class LlamaModel : public infinicore::nn::Module {
public:
    /**
     * @brief Construct LlamaModel module
     *
     * @param config Model configuration
     * @param device Device to create tensors on
     * @param dtype Optional data type for model parameters (defaults to F32)
     */
Your Name's avatar
Your Name committed
41
42
43
    LlamaModel(const LlamaConfig &config,
               const infinicore::Device &device,
               engine::distributed::RankInfo rank_info = engine::distributed::RankInfo());
44
45
46
47
48
49

    /**
     * @brief Forward pass: process input through the model
     *
     * @param input_ids Token IDs tensor of shape [batch, seq_len]
     * @param position_ids Position IDs tensor of shape [batch, seq_len] or [seq_len]
PanZezhong's avatar
PanZezhong committed
50
     * @param cache_positions Cache positions tensor of shape [n_req]
51
52
53
     * @return Output tensor of shape [batch, seq_len, hidden_size]
     */
    infinicore::Tensor forward(const infinicore::Tensor &input_ids,
Your Name's avatar
Your Name committed
54
                               const infinicore::Tensor &position_ids,
PanZezhong's avatar
PanZezhong committed
55
56
57
                               const infinicore::Tensor &cache_positions) const;

    void reset_cache(const cache::CacheConfig *cache_config);
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75

    // Module information
    const LlamaConfig &config() const { return config_; }
    size_t num_layers() const { return config_.num_hidden_layers; }

protected:
    // Token embeddings
    INFINICORE_NN_MODULE(infinicore::nn::Embedding, embed_tokens);

    // Decoder layers
    INFINICORE_NN_MODULE_VEC(LlamaDecoderLayer, layers);

    // Final normalization
    INFINICORE_NN_MODULE(infinicore::nn::RMSNorm, norm);

    // Rotary Position Embeddings (shared across all layers)
    INFINICORE_NN_MODULE(infinicore::nn::RoPE, rotary_emb);

PanZezhong's avatar
PanZezhong committed
76
77
78
79
    engine::distributed::RankInfo rank_info_;

    std::shared_ptr<cache::Cache> kv_cache_;

80
81
82
83
84
private:
    LlamaConfig config_;
};

} // namespace infinilm::models::llama