"...mmclassification-0.24.1/.pre-commit-config.yaml" did not exist on "b21b0c01b6991c6437a8f110a4db902cc2ea0325"
0004-solar-pro.patch 18.8 KB
Newer Older
1
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2
From: jmorganca <jmorganca@gmail.com>
3
Date: Sun, 20 Apr 2025 16:11:09 -0700
4
5
Subject: [PATCH] solar-pro

6
adds support for the Solar Pro architecture
7
---
Daniel Hiltgen's avatar
Daniel Hiltgen committed
8
9
 src/CMakeLists.txt         |   1 +
 src/llama-arch.cpp         |  21 +++++
10
11
 src/llama-arch.h           |   3 +
 src/llama-hparams.cpp      |   8 ++
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
 src/llama-hparams.h        |   5 ++
13
 src/llama-model-loader.cpp |   2 +-
Daniel Hiltgen's avatar
Daniel Hiltgen committed
14
 src/llama-model.cpp        |  48 +++++++++++
15
 src/llama-model.h          |   3 +
Daniel Hiltgen's avatar
Daniel Hiltgen committed
16
17
18
19
 src/models/models.h        |   5 ++
 src/models/solar.cpp       | 158 +++++++++++++++++++++++++++++++++++++
 10 files changed, 253 insertions(+), 1 deletion(-)
 create mode 100644 src/models/solar.cpp
20

Daniel Hiltgen's avatar
Daniel Hiltgen committed
21
22
23
24
25
26
27
28
29
30
31
32
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 67c7807e0..fda881640 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -125,6 +125,7 @@ add_library(llama
             models/seed-oss.cpp
             models/smallthinker.cpp
             models/smollm3.cpp
+            models/solar.cpp
             models/stablelm.cpp
             models/starcoder.cpp
             models/starcoder2.cpp
33
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
34
index 8571a2e02..b6bde25d5 100644
35
36
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
37
@@ -85,6 +85,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
38
     { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
39
     { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
40
41
42
     { LLM_ARCH_CHAMELEON,        "chameleon"        },
+    { LLM_ARCH_SOLAR,            "solar"            },
     { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
43
44
     { LLM_ARCH_PLM,              "plm"              },
     { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
@@ -204,6 +205,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
46
     { LLM_KV_ATTENTION_SCALE,                        "%s.attention.scale"                        },
Daniel Hiltgen's avatar
Daniel Hiltgen committed
47
48
     { LLM_KV_ATTENTION_OUTPUT_SCALE,                 "%s.attention.output_scale"                 },
     { LLM_KV_ATTENTION_TEMPERATURE_LENGTH,           "%s.attention.temperature_length"           },
49
+    { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,        "%s.attention.block_skip_connection"        },
50
51
     { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
     { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
52
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
53
@@ -2023,6 +2025,24 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
54
             { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
         },
     },
+    {
+        LLM_ARCH_SOLAR,
+        {
+            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+            { LLM_TENSOR_OUTPUT,          "output" },
+            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+            { LLM_TENSOR_BSKCN_TV,        "bskcn_tv" },
+        },
+    },
     {
76
         LLM_ARCH_WAVTOKENIZER_DEC,
77
         {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
78
@@ -2681,6 +2701,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
79
     {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
80
81
82
83
84
85
86
     // this tensor is loaded for T5, but never used
     {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
+    {LLM_TENSOR_BSKCN_TV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
     {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
diff --git a/src/llama-arch.h b/src/llama-arch.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
87
index 150646478..3936a4687 100644
88
89
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
@@ -89,6 +89,7 @@ enum llm_arch {
91
     LLM_ARCH_GRANITE_MOE,
92
     LLM_ARCH_GRANITE_HYBRID,
93
94
95
     LLM_ARCH_CHAMELEON,
+    LLM_ARCH_SOLAR,
     LLM_ARCH_WAVTOKENIZER_DEC,
96
97
     LLM_ARCH_PLM,
     LLM_ARCH_BAILINGMOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
98
@@ -208,6 +209,7 @@ enum llm_kv {
99
     LLM_KV_ATTENTION_SCALE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
100
101
     LLM_KV_ATTENTION_OUTPUT_SCALE,
     LLM_KV_ATTENTION_TEMPERATURE_LENGTH,
102
+    LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
103
104
     LLM_KV_ATTENTION_KEY_LENGTH_MLA,
     LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
105
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
106
@@ -459,6 +461,7 @@ enum llm_tensor {
107
108
109
110
111
112
113
114
     LLM_TENSOR_ENC_OUTPUT_NORM,
     LLM_TENSOR_CLS,
     LLM_TENSOR_CLS_OUT,
+    LLM_TENSOR_BSKCN_TV,
     LLM_TENSOR_CONV1D,
     LLM_TENSOR_CONVNEXT_DW,
     LLM_TENSOR_CONVNEXT_NORM,
diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115
index 8cdbaf69f..41127bf91 100644
116
117
--- a/src/llama-hparams.cpp
+++ b/src/llama-hparams.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
119
@@ -161,6 +161,14 @@ uint32_t llama_hparams::n_pos_per_embd() const {
     return rope_type == LLAMA_ROPE_TYPE_MROPE || rope_type == LLAMA_ROPE_TYPE_IMROPE ? 4 : 1;
120
 }
121
 
122
123
124
125
126
127
128
+bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const {
+    if (il < n_layer) {
+        return n_bskcn_arr[n][il] > 0;
+    }
+
+    GGML_ABORT("fatal error");
+}
129
130
131
+
 bool llama_hparams::is_swa(uint32_t il) const {
     if (il < n_layer) {
132
         return swa_layers[il];
133
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
134
index c3a53be79..2ffe7dd30 100644
135
136
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
137
@@ -64,6 +64,8 @@ struct llama_hparams {
138
139
140
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
 
141
+    std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr = {};
142
143
+
     uint32_t n_layer_dense_lead = 0;
144
145
     uint32_t n_lora_q           = 0;
     uint32_t n_lora_kv          = 0;
Daniel Hiltgen's avatar
Daniel Hiltgen committed
146
@@ -256,6 +258,9 @@ struct llama_hparams {
147
148
 
     uint32_t n_pos_per_embd() const;
149
 
150
151
+    // Block skip connection
+    bool n_bskcn(uint32_t n, uint32_t il) const;
152
153
+
     bool is_swa(uint32_t il) const;
154
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
155
     bool has_kv(uint32_t il) const;
156
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
157
index aa3a65f87..ee303bd58 100644
158
159
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
160
@@ -466,7 +466,7 @@ namespace GGUFMeta {
161
162
     template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
     template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
163
164
     template bool llama_model_loader::get_key_or_arr<std::array<float, 512>>(enum llm_kv kid, std::array<float, 512> & result, uint32_t n, bool required);
-
165
+    template bool llama_model_loader::get_key_or_arr<uint32_t>(const std::string & key, std::array<uint32_t, 512> & result, uint32_t n, bool required);
166
 
167
168
 llama_model_loader::llama_model_loader(
         const std::string & fname,
169
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
170
index c2a545531..4468de2f9 100644
171
172
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
173
@@ -1961,6 +1961,21 @@ void llama_model::load_hparams(llama_model_loader & ml) {
174
                     default: type = LLM_TYPE_UNKNOWN;
175
                }
176
177
178
179
             } break;
+        case LLM_ARCH_SOLAR:
+            {
+                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
180
181
+                for (size_t i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) {
+                    auto & bskcn = hparams.n_bskcn_arr[i];
182
+                    bskcn.fill(0);
183
+                    auto kv = LLM_KV(arch);
184
+                    ml.get_key_or_arr(format((kv(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION) + ".%d").c_str(), i), bskcn, hparams.n_layer, false);
185
186
187
+                }
+
+                switch (hparams.n_layer) {
188
189
+                    case 64: type = LLM_TYPE_22B; break;
+                    default: type = LLM_TYPE_UNKNOWN;
190
+                }
191
192
193
194
+            } break;
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
195
@@ -5350,6 +5365,34 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
196
 
197
                         layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
198
 
199
200
201
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
202
203
+                    }
+                } break;
204
205
+            case LLM_ARCH_SOLAR:
+                {
206
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
207
208
209
+
+                    // output
+                    {
210
211
+                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                        output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
212
213
214
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
215
+                        auto & layer = layers[i];
216
+
217
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
218
+
219
220
221
222
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
223
+
224
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
225
+
226
227
228
229
+                        layer.bskcn_tv = create_tensor(tn(LLM_TENSOR_BSKCN_TV, "weight", i), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
                         layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
                         layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
                         layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
@@ -7425,6 +7468,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
             {
                 llm = std::make_unique<llm_build_chameleon>(*this, params);
             } break;
+        case LLM_ARCH_SOLAR:
+            {
+                llm = std::make_unique<llm_build_solar>(*this, params);
+            } break;
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
                 llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params);
@@ -7684,6 +7731,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
         case LLM_ARCH_GRANITE_MOE:
         case LLM_ARCH_GRANITE_HYBRID:
         case LLM_ARCH_CHAMELEON:
+        case LLM_ARCH_SOLAR:
         case LLM_ARCH_BAILINGMOE:
         case LLM_ARCH_NEO_BERT:
         case LLM_ARCH_SMOLLM3:
diff --git a/src/llama-model.h b/src/llama-model.h
index f8342cf2c..cbf4e1bfa 100644
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -76,6 +76,7 @@ enum llm_type {
     LLM_TYPE_15B,
     LLM_TYPE_16B,
     LLM_TYPE_20B,
+    LLM_TYPE_22B,
     LLM_TYPE_26B,
     LLM_TYPE_27B,
     LLM_TYPE_30B,
@@ -404,6 +405,8 @@ struct llama_layer {
     struct ggml_tensor * ffn_act_beta    = nullptr;
     struct ggml_tensor * ffn_act_eps     = nullptr;
 
+    struct ggml_tensor * bskcn_tv = nullptr;
+
     struct llama_layer_posnet posnet;
 
     struct llama_layer_convnext convnext;
diff --git a/src/models/models.h b/src/models/models.h
index 7ba225b47..71fea796d 100644
--- a/src/models/models.h
+++ b/src/models/models.h
@@ -510,6 +510,11 @@ struct llm_build_smollm3 : public llm_graph_context {
     llm_build_smollm3(const llama_model & model, const llm_graph_params & params);
276
 };
277
 
278
+struct llm_build_solar : public llm_graph_context {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
+    llm_build_solar(const llama_model & model, const llm_graph_params & params);
+};
+
+
 struct llm_build_stablelm : public llm_graph_context {
     llm_build_stablelm(const llama_model & model, const llm_graph_params & params);
 };
diff --git a/src/models/solar.cpp b/src/models/solar.cpp
new file mode 100644
index 000000000..97383928c
--- /dev/null
+++ b/src/models/solar.cpp
@@ -0,0 +1,158 @@
+#include "models.h"
+
+llm_build_solar::llm_build_solar(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
295
296
297
298
299
300
301
+        const int64_t n_embd_head = hparams.n_embd_head_v;
+        GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+        GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+        struct ggml_tensor * cur;
+        struct ggml_tensor * inpL;
+
302
+        inpL = build_inp_embd(model.tok_embd);
303
304
305
306
307
+
+        // inp_pos - contains the positions
+        struct ggml_tensor * inp_pos = build_inp_pos();
+
+        // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
308
+        auto * inp_attn = build_attn_inp_kv();
309
310
+
+        const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
+
+        struct ggml_tensor * bskcn_1;
+        struct ggml_tensor * bskcn_2;
+
+        for (int il = 0; il < n_layer; ++il) {
+            struct ggml_tensor * inpSA = inpL;
+
+            if (hparams.n_bskcn(0, il)) {
+                bskcn_1 = inpSA;
+            }
+
+            if (hparams.n_bskcn(1, il)) {
+                bskcn_2 = inpSA;
+            }
+
+            if (hparams.n_bskcn(2, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
+
+            if (hparams.n_bskcn(3, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
339
+
340
+            // norm
341
+            cur = build_norm(inpL,
342
+                    model.layers[il].attn_norm, NULL,
343
+                    LLM_NORM_RMS, il);
344
345
346
347
348
+            cb(cur, "attn_norm", il);
+
+            // self-attention
+            {
+                // rope freq factors for llama3; may return nullptr for llama2 and other models
349
+                ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
350
351
+
+                // compute Q and K and RoPE them
352
+                ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
353
354
355
356
357
358
+                cb(Qcur, "Qcur", il);
+                if (model.layers[il].bq) {
+                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+                    cb(Qcur, "Qcur", il);
+                }
+
359
+                ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
360
361
362
+                cb(Kcur, "Kcur", il);
+                if (model.layers[il].bk) {
+                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
363
+                   cb(Kcur, "Kcur", il);
364
365
+                }
+
366
+                ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
367
368
369
370
371
372
+                cb(Vcur, "Vcur", il);
+                if (model.layers[il].bv) {
+                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+                    cb(Vcur, "Vcur", il);
+                }
+
373
374
375
376
+                Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head,    n_tokens);
+                Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+                Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+
377
+                Qcur = ggml_rope_ext(
378
379
380
381
+                        ctx0, Qcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
382
383
+
+                Kcur = ggml_rope_ext(
384
385
386
387
+                        ctx0, Kcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
388
+
389
+                cb(Qcur, "Qcur", il);
390
+                cb(Kcur, "Kcur", il);
391
+                cb(Vcur, "Vcur", il);
392
+
393
+                cur = build_attn(inp_attn,
394
+                        model.layers[il].wo, model.layers[il].bo,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
395
+                        Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
396
+                cb(cur, "attn_out", il);
397
398
399
400
+            }
+
+            if (il == n_layer - 1) {
+                // skip computing output for unused tokens
401
+                ggml_tensor * inp_out_ids = build_inp_out_ids();
402
403
404
405
+                cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
+                inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+            }
+
406
+            ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
407
408
409
+            cb(ffn_inp, "ffn_inp", il);
+
+            // feed-forward network
410
+            cur = build_norm(ffn_inp,
411
+                    model.layers[il].ffn_norm, NULL,
412
+                    LLM_NORM_RMS, il);
413
414
+            cb(cur, "ffn_norm", il);
+
415
+            cur = build_ffn(cur,
416
417
418
419
+                    model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   NULL,
+                    model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
+                    model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
+                    NULL,
420
+                    LLM_FFN_SILU, LLM_FFN_PAR, il);
421
422
423
424
425
+            cb(cur, "ffn_out", il);
+
+            cur = ggml_add(ctx0, cur, ffn_inp);
+            cb(cur, "ffn_out", il);
+
426
+            cur = build_cvec(cur, il);
427
428
429
430
431
432
433
+            cb(cur, "l_out", il);
+
+            // input for next layer
+            inpL = cur;
+        }
+
+        cur = inpL;
434
435
+
+        cur = build_norm(cur,
436
+                model.output_norm, NULL,
437
438
+                LLM_NORM_RMS, -1);
+
439
+        cb(cur, "result_norm", -1);
440
441
+        res->t_embd = cur;
+
442
+        // lm_head
443
444
+        cur = build_lora_mm(model.output, cur);
+
445
+        cb(cur, "result_output", -1);
446
447
+        res->t_logits = cur;
+
448
+        ggml_build_forward_expand(gf, cur);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
449
+}