0004-solar-pro.patch 17.7 KB
Newer Older
1
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2
From: jmorganca <jmorganca@gmail.com>
3
Date: Sun, 20 Apr 2025 16:11:09 -0700
4
5
Subject: [PATCH] solar-pro

6
adds support for the Solar Pro architecture
7
---
8
 src/llama-arch.cpp         |  21 ++++
9
10
 src/llama-arch.h           |   3 +
 src/llama-hparams.cpp      |   8 ++
11
 src/llama-hparams.h        |   5 +
12
 src/llama-model-loader.cpp |   2 +-
13
 src/llama-model.cpp        | 207 +++++++++++++++++++++++++++++++++++++
14
 src/llama-model.h          |   3 +
15
 7 files changed, 248 insertions(+), 1 deletion(-)
16

17
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
18
index 8ca769c5f..ab262ec0c 100644
19
20
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
21
@@ -82,6 +82,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
22
     { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
23
     { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
24
25
26
     { LLM_ARCH_CHAMELEON,        "chameleon"        },
+    { LLM_ARCH_SOLAR,            "solar"            },
     { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
27
28
     { LLM_ARCH_PLM,              "plm"              },
     { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
Daniel Hiltgen's avatar
Daniel Hiltgen committed
29
@@ -183,6 +184,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
30
     { LLM_KV_ATTENTION_SCALE,                        "%s.attention.scale"                        },
Daniel Hiltgen's avatar
Daniel Hiltgen committed
31
32
     { LLM_KV_ATTENTION_OUTPUT_SCALE,                 "%s.attention.output_scale"                 },
     { LLM_KV_ATTENTION_TEMPERATURE_LENGTH,           "%s.attention.temperature_length"           },
33
+    { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,        "%s.attention.block_skip_connection"        },
34
35
     { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
     { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
36
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
37
@@ -1901,6 +1903,24 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
38
             { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
         },
     },
+    {
+        LLM_ARCH_SOLAR,
+        {
+            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+            { LLM_TENSOR_OUTPUT,          "output" },
+            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
+            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+            { LLM_TENSOR_BSKCN_TV,        "bskcn_tv" },
+        },
+    },
     {
60
         LLM_ARCH_WAVTOKENIZER_DEC,
61
         {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
62
@@ -2469,6 +2489,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
63
     {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
64
65
66
67
68
69
70
     // this tensor is loaded for T5, but never used
     {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
+    {LLM_TENSOR_BSKCN_TV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
     {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
     {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
diff --git a/src/llama-arch.h b/src/llama-arch.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
71
index dea725c1a..ea2b4ffb9 100644
72
73
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
74
@@ -86,6 +86,7 @@ enum llm_arch {
75
     LLM_ARCH_GRANITE_MOE,
76
     LLM_ARCH_GRANITE_HYBRID,
77
78
79
     LLM_ARCH_CHAMELEON,
+    LLM_ARCH_SOLAR,
     LLM_ARCH_WAVTOKENIZER_DEC,
80
81
     LLM_ARCH_PLM,
     LLM_ARCH_BAILINGMOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
82
@@ -187,6 +188,7 @@ enum llm_kv {
83
     LLM_KV_ATTENTION_SCALE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
84
85
     LLM_KV_ATTENTION_OUTPUT_SCALE,
     LLM_KV_ATTENTION_TEMPERATURE_LENGTH,
86
+    LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
87
88
     LLM_KV_ATTENTION_KEY_LENGTH_MLA,
     LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
89
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
@@ -436,6 +438,7 @@ enum llm_tensor {
91
92
93
94
95
96
97
98
     LLM_TENSOR_ENC_OUTPUT_NORM,
     LLM_TENSOR_CLS,
     LLM_TENSOR_CLS_OUT,
+    LLM_TENSOR_BSKCN_TV,
     LLM_TENSOR_CONV1D,
     LLM_TENSOR_CONVNEXT_DW,
     LLM_TENSOR_CONVNEXT_NORM,
diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
99
index db65d69ea..b6bf6bbf2 100644
100
101
--- a/src/llama-hparams.cpp
+++ b/src/llama-hparams.cpp
102
@@ -151,6 +151,14 @@ uint32_t llama_hparams::n_pos_per_embd() const {
103
     return rope_type == LLAMA_ROPE_TYPE_MROPE ? 4 : 1;
104
 }
105
 
106
107
108
109
110
111
112
+bool llama_hparams::n_bskcn(uint32_t n, uint32_t il) const {
+    if (il < n_layer) {
+        return n_bskcn_arr[n][il] > 0;
+    }
+
+    GGML_ABORT("fatal error");
+}
113
114
115
+
 bool llama_hparams::is_swa(uint32_t il) const {
     if (il < n_layer) {
116
         return swa_layers[il];
117
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
index 6fcf91b7d..24569a258 100644
119
120
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
121
@@ -64,6 +64,8 @@ struct llama_hparams {
122
123
124
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
     std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
 
125
+    std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr = {};
126
127
+
     uint32_t n_layer_dense_lead = 0;
128
129
     uint32_t n_lora_q           = 0;
     uint32_t n_lora_kv          = 0;
Daniel Hiltgen's avatar
Daniel Hiltgen committed
130
@@ -250,6 +252,9 @@ struct llama_hparams {
131
132
 
     uint32_t n_pos_per_embd() const;
133
 
134
135
+    // Block skip connection
+    bool n_bskcn(uint32_t n, uint32_t il) const;
136
137
+
     bool is_swa(uint32_t il) const;
138
 
Daniel Hiltgen's avatar
Daniel Hiltgen committed
139
     bool has_kv(uint32_t il) const;
140
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
141
index aa3a65f87..ee303bd58 100644
142
143
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
144
@@ -466,7 +466,7 @@ namespace GGUFMeta {
145
146
     template bool llama_model_loader::get_key_or_arr<std::array<int, 4>>(enum llm_kv kid, std::array<int, 4> & result, uint32_t n, bool required);
     template bool llama_model_loader::get_key_or_arr<std::array<uint32_t, 512>>(enum llm_kv kid, std::array<uint32_t, 512> & result, uint32_t n, bool required);
147
148
     template bool llama_model_loader::get_key_or_arr<std::array<float, 512>>(enum llm_kv kid, std::array<float, 512> & result, uint32_t n, bool required);
-
149
+    template bool llama_model_loader::get_key_or_arr<uint32_t>(const std::string & key, std::array<uint32_t, 512> & result, uint32_t n, bool required);
150
 
151
152
 llama_model_loader::llama_model_loader(
         const std::string & fname,
153
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
154
index 2a83d6627..54621ea39 100644
155
156
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
157
@@ -1890,6 +1890,21 @@ void llama_model::load_hparams(llama_model_loader & ml) {
158
                     default: type = LLM_TYPE_UNKNOWN;
159
                }
160
161
162
163
             } break;
+        case LLM_ARCH_SOLAR:
+            {
+                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
164
165
+                for (size_t i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) {
+                    auto & bskcn = hparams.n_bskcn_arr[i];
166
+                    bskcn.fill(0);
167
+                    auto kv = LLM_KV(arch);
168
+                    ml.get_key_or_arr(format((kv(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION) + ".%d").c_str(), i), bskcn, hparams.n_layer, false);
169
170
171
+                }
+
+                switch (hparams.n_layer) {
172
173
+                    case 64: type = LLM_TYPE_22B; break;
+                    default: type = LLM_TYPE_UNKNOWN;
174
+                }
175
176
177
178
+            } break;
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
179
@@ -5224,6 +5239,34 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
180
 
181
                         layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
182
 
183
184
185
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
186
187
+                    }
+                } break;
188
189
+            case LLM_ARCH_SOLAR:
+                {
190
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
191
192
193
+
+                    // output
+                    {
194
195
+                        output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                        output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
196
197
198
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
199
+                        auto & layer = layers[i];
200
+
201
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
202
+
203
204
205
206
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
207
+
208
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
209
+
210
211
212
213
+                        layer.bskcn_tv = create_tensor(tn(LLM_TENSOR_BSKCN_TV, "weight", i), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
                         layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
                         layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
                         layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
214
@@ -16515,6 +16558,165 @@ struct llm_build_granite_hybrid : public llm_graph_context_mamba {
215
216
     }
 };
217
 
218
+struct llm_build_solar : public llm_graph_context {
219
+    llm_build_solar(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
220
221
222
223
224
225
226
+        const int64_t n_embd_head = hparams.n_embd_head_v;
+        GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+        GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+        struct ggml_tensor * cur;
+        struct ggml_tensor * inpL;
+
227
+        inpL = build_inp_embd(model.tok_embd);
228
229
230
231
232
+
+        // inp_pos - contains the positions
+        struct ggml_tensor * inp_pos = build_inp_pos();
+
+        // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
Daniel Hiltgen's avatar
Daniel Hiltgen committed
233
+        auto * inp_attn = build_attn_inp_kv();
234
235
+
+        const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
+
+        struct ggml_tensor * bskcn_1;
+        struct ggml_tensor * bskcn_2;
+
+        for (int il = 0; il < n_layer; ++il) {
+            struct ggml_tensor * inpSA = inpL;
+
+            if (hparams.n_bskcn(0, il)) {
+                bskcn_1 = inpSA;
+            }
+
+            if (hparams.n_bskcn(1, il)) {
+                bskcn_2 = inpSA;
+            }
+
+            if (hparams.n_bskcn(2, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
+
+            if (hparams.n_bskcn(3, il)) {
+                inpSA = ggml_add(
+                   ctx0,
+                   ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
+                   ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
+            }
264
+
265
+            // norm
266
+            cur = build_norm(inpL,
267
+                    model.layers[il].attn_norm, NULL,
268
+                    LLM_NORM_RMS, il);
269
270
271
272
273
+            cb(cur, "attn_norm", il);
+
+            // self-attention
+            {
+                // rope freq factors for llama3; may return nullptr for llama2 and other models
274
+                ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
275
276
+
+                // compute Q and K and RoPE them
277
+                ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
278
279
280
281
282
283
+                cb(Qcur, "Qcur", il);
+                if (model.layers[il].bq) {
+                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+                    cb(Qcur, "Qcur", il);
+                }
+
284
+                ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
285
286
287
288
289
290
+                cb(Kcur, "Kcur", il);
+                if (model.layers[il].bk) {
+                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+                    cb(Kcur, "Kcur", il);
+                }
+
291
+                ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
292
293
294
295
296
297
+                cb(Vcur, "Vcur", il);
+                if (model.layers[il].bv) {
+                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+                    cb(Vcur, "Vcur", il);
+                }
+
298
299
300
301
+                Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head,    n_tokens);
+                Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+                Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+
302
+                Qcur = ggml_rope_ext(
303
304
305
306
+                        ctx0, Qcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
307
308
+
+                Kcur = ggml_rope_ext(
309
310
311
312
+                        ctx0, Kcur, inp_pos, rope_factors,
+                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+                        ext_factor, attn_factor, beta_fast, beta_slow
+                        );
313
+
314
+                cb(Qcur, "Qcur", il);
315
+                cb(Kcur, "Kcur", il);
316
+                cb(Vcur, "Vcur", il);
317
+
318
+                cur = build_attn(inp_attn,
319
+                        model.layers[il].wo, model.layers[il].bo,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
320
+                        Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
321
+                cb(cur, "attn_out", il);
322
323
324
325
+            }
+
+            if (il == n_layer - 1) {
+                // skip computing output for unused tokens
326
+                ggml_tensor * inp_out_ids = build_inp_out_ids();
327
328
329
330
+                cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
+                inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+            }
+
331
+            ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
332
333
334
+            cb(ffn_inp, "ffn_inp", il);
+
+            // feed-forward network
335
+            cur = build_norm(ffn_inp,
336
+                    model.layers[il].ffn_norm, NULL,
337
+                    LLM_NORM_RMS, il);
338
339
+            cb(cur, "ffn_norm", il);
+
340
+            cur = build_ffn(cur,
341
342
343
344
+                    model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   NULL,
+                    model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
+                    model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
+                    NULL,
345
+                    LLM_FFN_SILU, LLM_FFN_PAR, il);
346
347
348
349
350
+            cb(cur, "ffn_out", il);
+
+            cur = ggml_add(ctx0, cur, ffn_inp);
+            cb(cur, "ffn_out", il);
+
351
+            cur = build_cvec(cur, il);
352
353
354
355
356
357
358
+            cb(cur, "l_out", il);
+
+            // input for next layer
+            inpL = cur;
+        }
+
+        cur = inpL;
359
360
+
+        cur = build_norm(cur,
361
+                model.output_norm, NULL,
362
363
+                LLM_NORM_RMS, -1);
+
364
+        cb(cur, "result_norm", -1);
365
366
+        res->t_embd = cur;
+
367
+        // lm_head
368
369
+        cur = build_lora_mm(model.output, cur);
+
370
+        cb(cur, "result_output", -1);
371
372
+        res->t_logits = cur;
+
373
+        ggml_build_forward_expand(gf, cur);
374
375
376
+    }
+};
+
377
378
379
 // ref: https://github.com/facebookresearch/chameleon
 // based on the original build_llama() function, changes:
 //   * qk-norm
Daniel Hiltgen's avatar
Daniel Hiltgen committed
380
@@ -20096,6 +20298,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
381
             {
382
                 llm = std::make_unique<llm_build_chameleon>(*this, params);
383
384
385
             } break;
+        case LLM_ARCH_SOLAR:
+            {
386
+                llm = std::make_unique<llm_build_solar>(*this, params);
387
+            } break;
388
389
         case LLM_ARCH_WAVTOKENIZER_DEC:
             {
390
                 llm = std::make_unique<llm_build_wavtokenizer_dec>(*this, params);
Daniel Hiltgen's avatar
Daniel Hiltgen committed
391
@@ -20331,6 +20537,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
392
         case LLM_ARCH_GRANITE_MOE:
393
         case LLM_ARCH_GRANITE_HYBRID:
394
395
396
         case LLM_ARCH_CHAMELEON:
+        case LLM_ARCH_SOLAR:
         case LLM_ARCH_BAILINGMOE:
397
398
         case LLM_ARCH_NEO_BERT:
         case LLM_ARCH_SMOLLM3:
399
diff --git a/src/llama-model.h b/src/llama-model.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
400
index 248f85410..4a7924aaa 100644
401
402
--- a/src/llama-model.h
+++ b/src/llama-model.h
Daniel Hiltgen's avatar
Daniel Hiltgen committed
403
@@ -76,6 +76,7 @@ enum llm_type {
404
405
406
407
     LLM_TYPE_15B,
     LLM_TYPE_16B,
     LLM_TYPE_20B,
+    LLM_TYPE_22B,
408
     LLM_TYPE_27B,
409
410
     LLM_TYPE_30B,
     LLM_TYPE_32B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
411
@@ -390,6 +391,8 @@ struct llama_layer {
412
413
     struct ggml_tensor * ffn_act_beta    = nullptr;
     struct ggml_tensor * ffn_act_eps     = nullptr;
414
415
416
417
418
419
 
+    struct ggml_tensor * bskcn_tv = nullptr;
+
     struct llama_layer_posnet posnet;
 
     struct llama_layer_convnext convnext;