llama_model.hpp 3.15 KB
Newer Older
1
2
#pragma once

3
4
5
6
7
#include "llama_config.hpp"
#include "llama_decoder_layer.hpp"
#include "../../cache/kv_cache.hpp"

#include "infinicore/nn/module.hpp"
8
#include "infinicore/nn/embedding.hpp"
Your Name's avatar
Your Name committed
9
#include "infinicore/nn/module.hpp"
10
11
12
#include "infinicore/nn/rmsnorm.hpp"
#include "infinicore/nn/rope.hpp"
#include "infinicore/tensor.hpp"
Your Name's avatar
Your Name committed
13
14
#include "llama_config.hpp"
#include "llama_decoder_layer.hpp"
Ceng's avatar
Ceng committed
15
#include <memory>
Your Name's avatar
Your Name committed
16
17
18
#include <vector>

#include "../../engine/distributed/distributed.hpp"
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41

namespace infinilm::models::llama {

/**
 * @brief Main Llama model architecture (without language modeling head)
 *
 * This is the core transformer model consisting of:
 * - Token embeddings (embed_tokens)
 * - Multiple decoder layers (layers)
 * - Final layer normalization (norm)
 * - Rotary Position Embeddings (rotary_emb)
 *
 * This matches the structure of HuggingFace's LlamaModel.
 */
class LlamaModel : public infinicore::nn::Module {
public:
    /**
     * @brief Construct LlamaModel module
     *
     * @param config Model configuration
     * @param device Device to create tensors on
     * @param dtype Optional data type for model parameters (defaults to F32)
     */
Your Name's avatar
Your Name committed
42
43
44
45
    LlamaModel(const LlamaConfig &config,
               const infinicore::Device &device,
               infinicore::DataType dtype = infinicore::DataType::F32,
               engine::distributed::RankInfo rank_info = engine::distributed::RankInfo());
46
47
48
49
50
51

    /**
     * @brief Forward pass: process input through the model
     *
     * @param input_ids Token IDs tensor of shape [batch, seq_len]
     * @param position_ids Position IDs tensor of shape [batch, seq_len] or [seq_len]
Ceng's avatar
Ceng committed
52
     * @param kv_cache Optional model-level KV cache for incremental decoding
53
54
55
     * @return Output tensor of shape [batch, seq_len, hidden_size]
     */
    infinicore::Tensor forward(const infinicore::Tensor &input_ids,
Your Name's avatar
Your Name committed
56
57
                               const infinicore::Tensor &position_ids,
                               void *kv_cache = nullptr) const;
58
59
60
61
62

    // Module information
    const LlamaConfig &config() const { return config_; }
    size_t num_layers() const { return config_.num_hidden_layers; }

Ceng's avatar
Ceng committed
63
64
65
66
67
68
69
70
    /**
     * @brief Reset the internal cache to a specific position
     * This should be called when starting a new generation sequence to prevent state
     * from persisting between different questions/prompts
     * @param pos Position to reset to (defaults to 0)
     */
    void reset_cache(size_t pos = 0) const;

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
protected:
    // Token embeddings
    INFINICORE_NN_MODULE(infinicore::nn::Embedding, embed_tokens);

    // Decoder layers
    INFINICORE_NN_MODULE_VEC(LlamaDecoderLayer, layers);

    // Final normalization
    INFINICORE_NN_MODULE(infinicore::nn::RMSNorm, norm);

    // Rotary Position Embeddings (shared across all layers)
    INFINICORE_NN_MODULE(infinicore::nn::RoPE, rotary_emb);

private:
    LlamaConfig config_;
Ceng's avatar
Ceng committed
86
87
88
89
    // Persistent cache for when no external cache is provided
    // Mutable because it's not part of the model's learned parameters,
    // but needs to persist across forward calls for incremental decoding
    mutable std::unique_ptr<infinilm::cache::DynamicCache> cache_;
90
91
92
};

} // namespace infinilm::models::llama