0004-solar-pro.patch 19.1 KB
Newer Older
1
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2
From: jmorganca <jmorganca@gmail.com>
3
Date: Sun, 20 Apr 2025 16:11:09 -0700
4
5
Subject: [PATCH] solar-pro

6
adds support for the Solar Pro architecture
7
---
Daniel Hiltgen's avatar
Daniel Hiltgen committed
8
 src/CMakeLists.txt         |   1 +
9
 src/llama-arch.cpp         |  20 +++++
10
11
 src/llama-arch.h           |   3 +
 src/llama-hparams.cpp      |   8 ++
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
 src/llama-hparams.h        |   5 ++
13
 src/llama-model-loader.cpp |   2 +-
Daniel Hiltgen's avatar
Daniel Hiltgen committed
14
 src/llama-model.cpp        |  48 +++++++++++
15
 src/llama-model.h          |   3 +
Daniel Hiltgen's avatar
Daniel Hiltgen committed
16
17
 src/models/models.h        |   5 ++
 src/models/solar.cpp       | 158 +++++++++++++++++++++++++++++++++++++
18
 10 files changed, 252 insertions(+), 1 deletion(-)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
19
 create mode 100644 src/models/solar.cpp
20

Daniel Hiltgen's avatar
Daniel Hiltgen committed
21
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
22
index 4192af7c0..bd44d73e7 100644
Daniel Hiltgen's avatar
Daniel Hiltgen committed
23
24
25
26
27
28
29
30
31
32
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -125,6 +125,7 @@ add_library(llama
             models/seed-oss.cpp
             models/smallthinker.cpp
             models/smollm3.cpp
+            models/solar.cpp
             models/stablelm.cpp
             models/starcoder.cpp
             models/starcoder2.cpp
33
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
34
index 8caf80afc..2ce8ffec0 100644
35
36
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
37
@@ -87,6 +87,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
38
     { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
39
     { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
40
41
42
     { LLM_ARCH_CHAMELEON,        "chameleon"        },
+    { LLM_ARCH_SOLAR,            "solar"            },
     { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
43
44
     { LLM_ARCH_PLM,              "plm"              },
     { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
45
@@ -208,6 +209,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
46
47
     { LLM_KV_ATTENTION_OUTPUT_SCALE,                 "%s.attention.output_scale"                 },
     { LLM_KV_ATTENTION_TEMPERATURE_LENGTH,           "%s.attention.temperature_length"           },
48
     { LLM_KV_ATTENTION_TEMPERATURE_SCALE,            "%s.attention.temperature_scale"            },
49
+    { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,        "%s.attention.block_skip_connection"        },
50
51
     { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
     { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
52
 
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
@@ -339,6 +341,7 @@ static const std::map<llm_tensor, const char *> LLM_TENSOR_NAMES = {
     { LLM_TENSOR_ATTN_QKV,                               "blk.%d.attn_qkv" },
     { LLM_TENSOR_LAYER_OUT_NORM,                         "blk.%d.layer_output_norm" },
     { LLM_TENSOR_ATTN_OUT_NORM,                          "blk.%d.attn_output_norm" },
+    { LLM_TENSOR_BSKCN_TV,                               "bskcn_tv" },
     { LLM_TENSOR_POS_EMBD,                               "position_embd" },
     { LLM_TENSOR_FFN_ACT,                                "blk.%d.ffn.act" },
     { LLM_TENSOR_TOKEN_EMBD_NORM,                        "token_embd_norm" },
@@ -2176,6 +2179,22 @@ static std::set<llm_tensor> llm_get_tensor_names(llm_arch arch) {
             return {
                 LLM_TENSOR_TOKEN_EMBD,
             };
+        case LLM_ARCH_SOLAR:
+            return {
+                LLM_TENSOR_TOKEN_EMBD,
+                LLM_TENSOR_OUTPUT_NORM,
+                LLM_TENSOR_OUTPUT,
+                LLM_TENSOR_ATTN_NORM,
+                LLM_TENSOR_ATTN_Q,
+                LLM_TENSOR_ATTN_K,
+                LLM_TENSOR_ATTN_V,
+                LLM_TENSOR_ATTN_OUT,
+                LLM_TENSOR_FFN_NORM,
+                LLM_TENSOR_FFN_GATE,
+                LLM_TENSOR_FFN_DOWN,
+                LLM_TENSOR_FFN_UP,
+                LLM_TENSOR_BSKCN_TV,
+            };
         default:
             GGML_ABORT("unknown architecture for tensor mapping");
     }
@@ -2344,6 +2363,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
85
     {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
86
87
88
89
90
91
92
     // this tensor is loaded for T5, but never used
     {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
+    {LLM_TENSOR_BSKCN_TV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
     {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
diff --git a/src/llama-arch.h b/src/llama-arch.h
93
index 6cbf9b1f8..14d461c76 100644
94
95
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
96
@@ -91,6 +91,7 @@ enum llm_arch {
97
     LLM_ARCH_GRANITE_MOE,
98
     LLM_ARCH_GRANITE_HYBRID,
99
100
101
     LLM_ARCH_CHAMELEON,
+    LLM_ARCH_SOLAR,
     LLM_ARCH_WAVTOKENIZER_DEC,
102
103
     LLM_ARCH_PLM,
     LLM_ARCH_BAILINGMOE,
104
@@ -212,6 +213,7 @@ enum llm_kv {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
105
106
     LLM_KV_ATTENTION_OUTPUT_SCALE,
     LLM_KV_ATTENTION_TEMPERATURE_LENGTH,
107
     LLM_KV_ATTENTION_TEMPERATURE_SCALE,
108
+    LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
109
110
     LLM_KV_ATTENTION_KEY_LENGTH_MLA,
     LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
111
 
112
@@ -465,6 +467,7 @@ enum llm_tensor {
113
114
115
116
117
118
119
120
     LLM_TENSOR_ENC_OUTPUT_NORM,
     LLM_TENSOR_CLS,
     LLM_TENSOR_CLS_OUT,
+    LLM_TENSOR_BSKCN_TV,
     LLM_TENSOR_CONV1D,
     LLM_TENSOR_CONVNEXT_DW,
     LLM_TENSOR_CONVNEXT_NORM,
diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp
121
index fe1fa4341..aabff2f06 100644
122
123
--- a/src/llama-hparams.cpp
+++ b/src/llama-hparams.cpp
124
@@ -163,6 +163,14 @@ uint32_t llama_hparams::n_pos_per_embd() const {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
125
     return rope_type == LLAMA_ROPE_TYPE_MROPE || rope_type == LLAMA_ROPE_TYPE_IMROPE ? 4 : 1;
126
 }
127
 
128
129
130
131
132
133
134
+bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const {
+    if (il < n_layer) {
+        return n_bskcn_arr[n][il] > 0;
+    }
+
+    GGML_ABORT("fatal error");
+}
135
136
137
+
 bool llama_hparams::is_swa(uint32_t il) const {
     if (il < n_layer) {
138
         return swa_layers[il];
139
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
140
index f6e95b5d2..c6e673276 100644
141
142
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
143
@@ -65,6 +65,8 @@ struct llama_hparams {
144
145
146
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
 
147
+    std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr = {};
148
149
+
     uint32_t n_layer_dense_lead = 0;
150
151
     uint32_t n_lora_q           = 0;
     uint32_t n_lora_kv          = 0;
152
@@ -259,6 +261,9 @@ struct llama_hparams {
153
154
 
     uint32_t n_pos_per_embd() const;
155
 
156
157
+    // Block skip connection
+    bool n_bskcn(uint32_t n, uint32_t il) const;
158
159
+
     bool is_swa(uint32_t il) const;
160
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
161
     bool has_kv(uint32_t il) const;
162
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
163
index ca2ea2461..8916a6242 100644
164
165
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
166
@@ -466,7 +466,7 @@ namespace GGUFMeta {
167
168
     template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
     template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
169
170
     template bool llama_model_loader::get_key_or_arr<std::array<float, 512>>(enum llm_kv kid, std::array<float, 512> & result, uint32_t n, bool required);
-
171
+    template bool llama_model_loader::get_key_or_arr<uint32_t>(const std::string & key, std::array<uint32_t, 512> & result, uint32_t n, bool required);
172
 
173
174
 llama_model_loader::llama_model_loader(
         const std::string & fname,
175
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
176
index ae8207ee1..00cd579e0 100644
177
178
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
179
@@ -1995,6 +1995,21 @@ void llama_model::load_hparams(llama_model_loader & ml) {
180
                     default: type = LLM_TYPE_UNKNOWN;
181
                }
182
183
184
185
             } break;
+        case LLM_ARCH_SOLAR:
+            {
+                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
186
187
+                for (size_t i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) {
+                    auto & bskcn = hparams.n_bskcn_arr[i];
188
+                    bskcn.fill(0);
189
+                    auto kv = LLM_KV(arch);
190
+                    ml.get_key_or_arr(format((kv(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION) + ".%d").c_str(), i), bskcn, hparams.n_layer, false);
191
192
193
+                }
+
+                switch (hparams.n_layer) {
194
195
+                    case 64: type = LLM_TYPE_22B; break;
+                    default: type = LLM_TYPE_UNKNOWN;
196
+                }
197
198
199
200
+            } break;
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
201
@@ -5429,6 +5444,34 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
202
 
203
                         layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
204
 
205
206
207
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
208
209
+                    }
+                } break;
210
211
+            case LLM_ARCH_SOLAR:
+                {
212
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
213
214
215
+
+                    // output
+                    {
216
217
+                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                        output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
218
219
220
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
221
+                        auto & layer = layers[i];
222
+
223
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
224
+
225
226
227
228
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
229
+
230
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
231
+
232
233
234
235
+                        layer.bskcn_tv = create_tensor(tn(LLM_TENSOR_BSKCN_TV, "weight", i), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
                         layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
                         layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
                         layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
236
@@ -7534,6 +7577,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
237
238
239
240
241
242
243
244
245
246
             {
                 llm = std::make_unique<llm_build_chameleon>(*this, params);
             } break;
+        case LLM_ARCH_SOLAR:
+            {
+                llm = std::make_unique<llm_build_solar>(*this, params);
+            } break;
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
                 llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params);
247
@@ -7798,6 +7845,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
248
249
250
251
252
253
254
255
         case LLM_ARCH_GRANITE_MOE:
         case LLM_ARCH_GRANITE_HYBRID:
         case LLM_ARCH_CHAMELEON:
+        case LLM_ARCH_SOLAR:
         case LLM_ARCH_BAILINGMOE:
         case LLM_ARCH_NEO_BERT:
         case LLM_ARCH_SMOLLM3:
diff --git a/src/llama-model.h b/src/llama-model.h
256
index c6eb95318..b378b23ec 100644
Daniel Hiltgen's avatar
Daniel Hiltgen committed
257
258
259
260
261
262
263
264
265
266
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -76,6 +76,7 @@ enum llm_type {
     LLM_TYPE_15B,
     LLM_TYPE_16B,
     LLM_TYPE_20B,
+    LLM_TYPE_22B,
     LLM_TYPE_26B,
     LLM_TYPE_27B,
     LLM_TYPE_30B,
267
@@ -405,6 +406,8 @@ struct llama_layer {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
268
269
270
271
272
273
274
275
276
     struct ggml_tensor * ffn_act_beta    = nullptr;
     struct ggml_tensor * ffn_act_eps     = nullptr;
 
+    struct ggml_tensor * bskcn_tv = nullptr;
+
     struct llama_layer_posnet posnet;
 
     struct llama_layer_convnext convnext;
diff --git a/src/models/models.h b/src/models/models.h
277
index ffb36acc6..6d84a185d 100644
Daniel Hiltgen's avatar
Daniel Hiltgen committed
278
279
--- a/src/models/models.h
+++ b/src/models/models.h
280
@@ -515,6 +515,11 @@ struct llm_build_smollm3 : public llm_graph_context {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
281
     llm_build_smollm3(const llama_model & model, const llm_graph_params & params);
282
 };
283
 
284
+struct llm_build_solar : public llm_graph_context {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
+    llm_build_solar(const llama_model & model, const llm_graph_params & params);
+};
+
+
 struct llm_build_stablelm : public llm_graph_context {
     llm_build_stablelm(const llama_model & model, const llm_graph_params & params);
 };
diff --git a/src/models/solar.cpp b/src/models/solar.cpp
new file mode 100644
index 000000000..97383928c
--- /dev/null
+++ b/src/models/solar.cpp
@@ -0,0 +1,158 @@
+#include "models.h"
+
+llm_build_solar::llm_build_solar(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
301
302
303
304
305
306
307
+        const int64_t n_embd_head = hparams.n_embd_head_v;
+        GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+        GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+        struct ggml_tensor * cur;
+        struct ggml_tensor * inpL;
+
308
+        inpL = build_inp_embd(model.tok_embd);
309
310
311
312
313
+
+        // inp_pos - contains the positions
+        struct ggml_tensor * inp_pos = build_inp_pos();
+
+        // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
314
+        auto * inp_attn = build_attn_inp_kv();
315
316
+
+        const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
+
+        struct ggml_tensor * bskcn_1;
+        struct ggml_tensor * bskcn_2;
+
+        for (int il = 0; il < n_layer; ++il) {
+            struct ggml_tensor * inpSA = inpL;
+
+            if (hparams.n_bskcn(0, il)) {
+                bskcn_1 = inpSA;
+            }
+
+            if (hparams.n_bskcn(1, il)) {
+                bskcn_2 = inpSA;
+            }
+
+            if (hparams.n_bskcn(2, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
+
+            if (hparams.n_bskcn(3, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
345
+
346
+            // norm
347
+            cur = build_norm(inpL,
348
+                    model.layers[il].attn_norm, NULL,
349
+                    LLM_NORM_RMS, il);
350
351
352
353
354
+            cb(cur, "attn_norm", il);
+
+            // self-attention
+            {
+                // rope freq factors for llama3; may return nullptr for llama2 and other models
355
+                ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
356
357
+
+                // compute Q and K and RoPE them
358
+                ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
359
360
361
362
363
364
+                cb(Qcur, "Qcur", il);
+                if (model.layers[il].bq) {
+                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+                    cb(Qcur, "Qcur", il);
+                }
+
365
+                ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
366
367
368
+                cb(Kcur, "Kcur", il);
+                if (model.layers[il].bk) {
+                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
369
+                   cb(Kcur, "Kcur", il);
370
371
+                }
+
372
+                ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
373
374
375
376
377
378
+                cb(Vcur, "Vcur", il);
+                if (model.layers[il].bv) {
+                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+                    cb(Vcur, "Vcur", il);
+                }
+
379
380
381
382
+                Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head,    n_tokens);
+                Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+                Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+
383
+                Qcur = ggml_rope_ext(
384
385
386
387
+                        ctx0, Qcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
388
389
+
+                Kcur = ggml_rope_ext(
390
391
392
393
+                        ctx0, Kcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
394
+
395
+                cb(Qcur, "Qcur", il);
396
+                cb(Kcur, "Kcur", il);
397
+                cb(Vcur, "Vcur", il);
398
+
399
+                cur = build_attn(inp_attn,
400
+                        model.layers[il].wo, model.layers[il].bo,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
401
+                        Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
402
+                cb(cur, "attn_out", il);
403
404
405
406
+            }
+
+            if (il == n_layer - 1) {
+                // skip computing output for unused tokens
407
+                ggml_tensor * inp_out_ids = build_inp_out_ids();
408
409
410
411
+                cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
+                inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+            }
+
412
+            ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
413
414
415
+            cb(ffn_inp, "ffn_inp", il);
+
+            // feed-forward network
416
+            cur = build_norm(ffn_inp,
417
+                    model.layers[il].ffn_norm, NULL,
418
+                    LLM_NORM_RMS, il);
419
420
+            cb(cur, "ffn_norm", il);
+
421
+            cur = build_ffn(cur,
422
423
424
425
+                    model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   NULL,
+                    model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
+                    model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
+                    NULL,
426
+                    LLM_FFN_SILU, LLM_FFN_PAR, il);
427
428
429
430
431
+            cb(cur, "ffn_out", il);
+
+            cur = ggml_add(ctx0, cur, ffn_inp);
+            cb(cur, "ffn_out", il);
+
432
+            cur = build_cvec(cur, il);
433
434
435
436
437
438
439
+            cb(cur, "l_out", il);
+
+            // input for next layer
+            inpL = cur;
+        }
+
+        cur = inpL;
440
441
+
+        cur = build_norm(cur,
442
+                model.output_norm, NULL,
443
444
+                LLM_NORM_RMS, -1);
+
445
+        cb(cur, "result_norm", -1);
446
447
+        res->t_embd = cur;
+
448
+        // lm_head
449
450
+        cur = build_lora_mm(model.output, cur);
+
451
+        cb(cur, "result_output", -1);
452
453
+        res->t_logits = cur;
+
454
+        ggml_build_forward_expand(gf, cur);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
455
+}