llama-model.h 17.3 KB
Newer Older
1
2
3
4
#pragma once

#include "llama.h"
#include "llama-arch.h"
5
#include "llama-graph.h"
6
#include "llama-hparams.h"
7
#include "llama-memory.h"
8
9
#include "llama-vocab.h"

Daniel Hiltgen's avatar
Daniel Hiltgen committed
10
#include <map>
11
12
13
#include <memory>
#include <string>
#include <unordered_map>
14
15
#include <vector>

16
17
struct llama_cparams;
struct llama_ubatch;
18
19
struct llama_model_loader;

20
21
// available models
enum llm_type {
22
23
24
25
26
27
28
29
30
31
    LLM_TYPE_UNKNOWN,
    LLM_TYPE_14M,
    LLM_TYPE_17M,
    LLM_TYPE_22M,
    LLM_TYPE_33M,
    LLM_TYPE_60M,
    LLM_TYPE_70M,
    LLM_TYPE_80M,
    LLM_TYPE_109M,
    LLM_TYPE_137M,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
32
    LLM_TYPE_140M,
33
    LLM_TYPE_160M,
34
    LLM_TYPE_190M,
35
36
    LLM_TYPE_220M,
    LLM_TYPE_250M,
37
    LLM_TYPE_256M,
38
39
    LLM_TYPE_270M,
    LLM_TYPE_335M,
40
    LLM_TYPE_350M,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
41
    LLM_TYPE_360M,
42
43
    LLM_TYPE_410M,
    LLM_TYPE_450M,
44
    LLM_TYPE_475M,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
45
    LLM_TYPE_558M,
46
    LLM_TYPE_700M,
47
48
    LLM_TYPE_770M,
    LLM_TYPE_780M,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
49
    LLM_TYPE_950M,
50
    LLM_TYPE_0_3B,
51
    LLM_TYPE_0_5B,
52
    LLM_TYPE_0_6B,
53
    LLM_TYPE_1B,
54
    LLM_TYPE_1_2B,
55
56
57
58
    LLM_TYPE_1_3B,
    LLM_TYPE_1_4B,
    LLM_TYPE_1_5B,
    LLM_TYPE_1_6B,
59
    LLM_TYPE_1_7B,
60
    LLM_TYPE_1_8B,
61
    LLM_TYPE_2B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
62
    LLM_TYPE_2_6B,
63
    LLM_TYPE_2_8B,
64
    LLM_TYPE_2_9B,
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    LLM_TYPE_3B,
    LLM_TYPE_4B,
    LLM_TYPE_6B,
    LLM_TYPE_6_9B,
    LLM_TYPE_7B,
    LLM_TYPE_8B,
    LLM_TYPE_9B,
    LLM_TYPE_11B,
    LLM_TYPE_12B,
    LLM_TYPE_13B,
    LLM_TYPE_14B,
    LLM_TYPE_15B,
    LLM_TYPE_16B,
    LLM_TYPE_20B,
    LLM_TYPE_22B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
80
    LLM_TYPE_26B,
81
    LLM_TYPE_27B,
82
83
84
85
    LLM_TYPE_30B,
    LLM_TYPE_32B,
    LLM_TYPE_34B,
    LLM_TYPE_35B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
86
    LLM_TYPE_36B,
87
88
89
    LLM_TYPE_40B,
    LLM_TYPE_65B,
    LLM_TYPE_70B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
90
    LLM_TYPE_120B,
91
    LLM_TYPE_142B,
92
    LLM_TYPE_236B,
93
    LLM_TYPE_290B,
94
    LLM_TYPE_314B,
95
    LLM_TYPE_405B,
96
97
98
99
100
101
102
103
104
105
106
107
108
    LLM_TYPE_671B,
    LLM_TYPE_SMALL,
    LLM_TYPE_MEDIUM,
    LLM_TYPE_LARGE,
    LLM_TYPE_XL,
    LLM_TYPE_A1_7B,
    LLM_TYPE_A2_7B,
    LLM_TYPE_8x7B,
    LLM_TYPE_8x22B,
    LLM_TYPE_16x12B,
    LLM_TYPE_16x3_8B,
    LLM_TYPE_10B_128x3_66B,
    LLM_TYPE_57B_A14B,
109
110
    LLM_TYPE_17B_16E, // llama4 Scout
    LLM_TYPE_17B_128E, // llama4 Maverick
111
    LLM_TYPE_A13B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
112
    LLM_TYPE_7B_A1B,
113
    LLM_TYPE_8B_A1B, // lfm2moe
Daniel Hiltgen's avatar
Daniel Hiltgen committed
114
    LLM_TYPE_16B_A1B,
115
    LLM_TYPE_21B_A3B, // Ernie MoE small
116
    LLM_TYPE_30B_A3B,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
117
    LLM_TYPE_80B_A3B, // Qwen3 Next
Daniel Hiltgen's avatar
Daniel Hiltgen committed
118
    LLM_TYPE_100B_A6B,
119
    LLM_TYPE_106B_A12B, // GLM-4.5-Air
Daniel Hiltgen's avatar
Daniel Hiltgen committed
120
    LLM_TYPE_230B_A10B, // Minimax M2
121
    LLM_TYPE_235B_A22B,
122
123
124
125
    LLM_TYPE_300B_A47B, // Ernie MoE big
    LLM_TYPE_355B_A32B, // GLM-4.5
    LLM_TYPE_E2B,
    LLM_TYPE_E4B,
126
127
};

128
129
std::string llama_rope_scaling_type_name(llama_rope_scaling_type rope_scaling_type);

130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
struct llama_layer_posnet {
    // resnet
    struct ggml_tensor * norm1   = nullptr;
    struct ggml_tensor * norm1_b = nullptr;

    struct ggml_tensor * conv1   = nullptr;
    struct ggml_tensor * conv1_b = nullptr;

    struct ggml_tensor * norm2   = nullptr;
    struct ggml_tensor * norm2_b = nullptr;

    struct ggml_tensor * conv2   = nullptr;
    struct ggml_tensor * conv2_b = nullptr;

    // attention
    struct ggml_tensor * attn_norm   = nullptr;
    struct ggml_tensor * attn_norm_b = nullptr;

    struct ggml_tensor * attn_q   = nullptr;
    struct ggml_tensor * attn_q_b = nullptr;

    struct ggml_tensor * attn_k   = nullptr;
    struct ggml_tensor * attn_k_b = nullptr;

    struct ggml_tensor * attn_v   = nullptr;
    struct ggml_tensor * attn_v_b = nullptr;

    struct ggml_tensor * attn_o   = nullptr;
    struct ggml_tensor * attn_o_b = nullptr;

    // normalize
    struct ggml_tensor * norm   = nullptr;
    struct ggml_tensor * norm_b = nullptr;
};

struct llama_layer_convnext {
    struct ggml_tensor * dw   = nullptr;
    struct ggml_tensor * dw_b = nullptr;

    struct ggml_tensor * norm   = nullptr;
    struct ggml_tensor * norm_b = nullptr;

    struct ggml_tensor * pw1   = nullptr;
    struct ggml_tensor * pw1_b = nullptr;

    struct ggml_tensor * pw2   = nullptr;
    struct ggml_tensor * pw2_b = nullptr;

    struct ggml_tensor * gamma = nullptr;
};

181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
struct llama_layer_shortconv {
    struct ggml_tensor * in_proj  = nullptr;
    struct ggml_tensor * conv     = nullptr;
    struct ggml_tensor * out_proj = nullptr;
};

struct llama_layer_nextn {
    struct ggml_tensor * eh_proj          = nullptr;
    struct ggml_tensor * embed_tokens     = nullptr;
    struct ggml_tensor * enorm            = nullptr;
    struct ggml_tensor * hnorm            = nullptr;
    struct ggml_tensor * shared_head_head = nullptr;
    struct ggml_tensor * shared_head_norm = nullptr;
};

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
struct llama_layer {
    // normalization
    struct ggml_tensor * attn_norm       = nullptr;
    struct ggml_tensor * attn_norm_b     = nullptr;
    struct ggml_tensor * attn_norm_2     = nullptr;
    struct ggml_tensor * attn_norm_2_b   = nullptr;
    struct ggml_tensor * attn_q_norm     = nullptr;
    struct ggml_tensor * attn_q_norm_b   = nullptr;
    struct ggml_tensor * attn_k_norm     = nullptr;
    struct ggml_tensor * attn_k_norm_b   = nullptr;
    struct ggml_tensor * attn_out_norm   = nullptr;
    struct ggml_tensor * attn_out_norm_b = nullptr;
    struct ggml_tensor * attn_q_a_norm   = nullptr;
    struct ggml_tensor * attn_kv_a_norm  = nullptr;
    struct ggml_tensor * attn_sub_norm   = nullptr;
    struct ggml_tensor * attn_post_norm  = nullptr;
    struct ggml_tensor * ffn_sub_norm    = nullptr;
    struct ggml_tensor * attn_norm_cross = nullptr;
    struct ggml_tensor * attn_norm_enc   = nullptr;
215
216
217
218
    struct ggml_tensor * ssm_norm        = nullptr;
    struct ggml_tensor * ssm_dt_norm     = nullptr;
    struct ggml_tensor * ssm_b_norm      = nullptr;
    struct ggml_tensor * ssm_c_norm      = nullptr;
219
220
221
222
223
224
225
226
227
228
229

    // attention
    struct ggml_tensor * wq        = nullptr;
    struct ggml_tensor * wk        = nullptr;
    struct ggml_tensor * wv        = nullptr;
    struct ggml_tensor * wo        = nullptr;
    struct ggml_tensor * wqkv      = nullptr;
    struct ggml_tensor * wq_a      = nullptr;
    struct ggml_tensor * wq_b      = nullptr;
    struct ggml_tensor * wkv_a_mqa = nullptr;
    struct ggml_tensor * wkv_b     = nullptr;
230
231
    struct ggml_tensor * wk_b      = nullptr;
    struct ggml_tensor * wv_b      = nullptr;
232
233
234
235
236
237
238
239
    struct ggml_tensor * wq_cross  = nullptr;
    struct ggml_tensor * wk_cross  = nullptr;
    struct ggml_tensor * wv_cross  = nullptr;
    struct ggml_tensor * wo_cross  = nullptr;
    struct ggml_tensor * wq_enc    = nullptr;
    struct ggml_tensor * wk_enc    = nullptr;
    struct ggml_tensor * wv_enc    = nullptr;
    struct ggml_tensor * wo_enc    = nullptr;
Daniel Hiltgen's avatar
Daniel Hiltgen committed
240
    struct ggml_tensor * wqkv_gate = nullptr;
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271

    // attention bias
    struct ggml_tensor * bq   = nullptr;
    struct ggml_tensor * bk   = nullptr;
    struct ggml_tensor * bv   = nullptr;
    struct ggml_tensor * bo   = nullptr;
    struct ggml_tensor * bqkv = nullptr;

    // relative position bias
    struct ggml_tensor * attn_rel_b       = nullptr;
    struct ggml_tensor * attn_rel_b_enc   = nullptr;
    struct ggml_tensor * attn_rel_b_cross = nullptr;

    // normalization
    struct ggml_tensor * ffn_norm         = nullptr;
    struct ggml_tensor * ffn_norm_b       = nullptr;
    struct ggml_tensor * ffn_post_norm    = nullptr;
    struct ggml_tensor * layer_out_norm   = nullptr;
    struct ggml_tensor * layer_out_norm_b = nullptr;
    struct ggml_tensor * ffn_norm_exps    = nullptr;
    struct ggml_tensor * ffn_norm_enc     = nullptr;

    // ff
    struct ggml_tensor * ffn_gate     = nullptr; // w1
    struct ggml_tensor * ffn_down     = nullptr; // w2
    struct ggml_tensor * ffn_up       = nullptr; // w3
    struct ggml_tensor * ffn_gate_enc = nullptr;
    struct ggml_tensor * ffn_down_enc = nullptr;
    struct ggml_tensor * ffn_up_enc   = nullptr;

    // ff MoE
272
273
274
275
276
277
278
279
    struct ggml_tensor * ffn_gate_inp    = nullptr;
    struct ggml_tensor * ffn_gate_exps   = nullptr;
    struct ggml_tensor * ffn_down_exps   = nullptr;
    struct ggml_tensor * ffn_up_exps     = nullptr;
    struct ggml_tensor * ffn_gate_inp_b  = nullptr;
    struct ggml_tensor * ffn_gate_exps_b = nullptr;
    struct ggml_tensor * ffn_down_exps_b = nullptr;
    struct ggml_tensor * ffn_up_exps_b   = nullptr;
280
281
282
283
284
285
286

    // ff shared expert (shexp)
    struct ggml_tensor * ffn_gate_inp_shexp = nullptr;
    struct ggml_tensor * ffn_gate_shexp     = nullptr;
    struct ggml_tensor * ffn_down_shexp     = nullptr;
    struct ggml_tensor * ffn_up_shexp       = nullptr;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
287
288
289
290
291
    // ff adjugate experts (chexps)
    struct ggml_tensor * ffn_gate_chexps     = nullptr;
    struct ggml_tensor * ffn_down_chexps     = nullptr;
    struct ggml_tensor * ffn_up_chexps       = nullptr;

292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
    // ff bias
    struct ggml_tensor * ffn_gate_b = nullptr;
    struct ggml_tensor * ffn_down_b = nullptr; // b2
    struct ggml_tensor * ffn_up_b   = nullptr; // b3
    struct ggml_tensor * ffn_act    = nullptr;
    struct ggml_tensor * ffn_exp_probs_b = nullptr;

    // mamba proj
    struct ggml_tensor * ssm_in  = nullptr;
    struct ggml_tensor * ssm_x   = nullptr;
    struct ggml_tensor * ssm_dt  = nullptr;
    struct ggml_tensor * ssm_out = nullptr;

    // mamba
    struct ggml_tensor * ssm_conv1d = nullptr;
    struct ggml_tensor * ssm_a      = nullptr;
    struct ggml_tensor * ssm_d      = nullptr;

    // mamba bias
    struct ggml_tensor * ssm_conv1d_b = nullptr;
    struct ggml_tensor * ssm_dt_b     = nullptr;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
314
315
316
    // qwen3next
    struct ggml_tensor * ssm_beta_alpha = nullptr;

317
318
319
320
321
322
323
324
325
    // rwkv
    struct ggml_tensor * time_mix_w1         = nullptr;
    struct ggml_tensor * time_mix_w2         = nullptr;
    struct ggml_tensor * time_mix_lerp_x     = nullptr;
    struct ggml_tensor * time_mix_lerp_w     = nullptr;
    struct ggml_tensor * time_mix_lerp_k     = nullptr;
    struct ggml_tensor * time_mix_lerp_v     = nullptr;
    struct ggml_tensor * time_mix_lerp_r     = nullptr;
    struct ggml_tensor * time_mix_lerp_g     = nullptr;
326
327
328
329
330
331
332
333
334
335
336
337
338
    struct ggml_tensor * time_mix_lerp_fused = nullptr;

    struct ggml_tensor * time_mix_first        = nullptr;
    struct ggml_tensor * time_mix_decay        = nullptr;
    struct ggml_tensor * time_mix_decay_w1     = nullptr;
    struct ggml_tensor * time_mix_decay_w2     = nullptr;
    struct ggml_tensor * time_mix_key          = nullptr;
    struct ggml_tensor * time_mix_key_b        = nullptr;
    struct ggml_tensor * time_mix_value        = nullptr;
    struct ggml_tensor * time_mix_value_b      = nullptr;
    struct ggml_tensor * time_mix_receptance   = nullptr;
    struct ggml_tensor * time_mix_receptance_b = nullptr;
    struct ggml_tensor * time_mix_gate         = nullptr;
339

340
341
342
343
344
345
346
347
348
349
350
351
352
353
    // rwkv7
    struct ggml_tensor * time_mix_w0         = nullptr;
    struct ggml_tensor * time_mix_a0         = nullptr;
    struct ggml_tensor * time_mix_a1         = nullptr;
    struct ggml_tensor * time_mix_a2         = nullptr;
    struct ggml_tensor * time_mix_v0         = nullptr;
    struct ggml_tensor * time_mix_v1         = nullptr;
    struct ggml_tensor * time_mix_v2         = nullptr;
    struct ggml_tensor * time_mix_g1         = nullptr;
    struct ggml_tensor * time_mix_g2         = nullptr;
    struct ggml_tensor * time_mix_k_k        = nullptr;
    struct ggml_tensor * time_mix_k_a        = nullptr;
    struct ggml_tensor * time_mix_r_k        = nullptr;

354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
    struct ggml_tensor * time_mix_ln     = nullptr;
    struct ggml_tensor * time_mix_ln_b   = nullptr;
    struct ggml_tensor * time_mix_output = nullptr;

    struct ggml_tensor * channel_mix_lerp_k = nullptr;
    struct ggml_tensor * channel_mix_lerp_r = nullptr;

    struct ggml_tensor * channel_mix_key        = nullptr;
    struct ggml_tensor * channel_mix_receptance = nullptr;
    struct ggml_tensor * channel_mix_value      = nullptr;

    // long rope factors
    struct ggml_tensor * rope_long  = nullptr;
    struct ggml_tensor * rope_short = nullptr;
    struct ggml_tensor * rope_freqs = nullptr;

    // bitnet scale
    struct ggml_tensor * wq_scale       = nullptr;
    struct ggml_tensor * wk_scale       = nullptr;
    struct ggml_tensor * wv_scale       = nullptr;
    struct ggml_tensor * wo_scale       = nullptr;
    struct ggml_tensor * ffn_gate_scale = nullptr;
    struct ggml_tensor * ffn_up_scale   = nullptr;
    struct ggml_tensor * ffn_down_scale = nullptr;

379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
    // altup & laurel
    struct ggml_tensor * per_layer_inp_gate   = nullptr;
    struct ggml_tensor * per_layer_proj       = nullptr;
    struct ggml_tensor * per_layer_post_norm  = nullptr;
    struct ggml_tensor * altup_correct_coef   = nullptr;
    struct ggml_tensor * altup_correct_scale  = nullptr;
    struct ggml_tensor * altup_predict_coef   = nullptr;
    struct ggml_tensor * altup_router         = nullptr;
    struct ggml_tensor * altup_router_norm    = nullptr;
    struct ggml_tensor * laurel_l             = nullptr;
    struct ggml_tensor * laurel_r             = nullptr;
    struct ggml_tensor * laurel_post_norm     = nullptr;

    // openai-moe
    struct ggml_tensor * attn_sinks = nullptr;

Daniel Hiltgen's avatar
Daniel Hiltgen committed
395
396
397
398
399
400
401
    // cogvlm
    struct ggml_tensor * visexp_attn_wqkv = nullptr;
    struct ggml_tensor * visexp_attn_wo   = nullptr;
    struct ggml_tensor * visexp_ffn_gate  = nullptr;
    struct ggml_tensor * visexp_ffn_down  = nullptr;
    struct ggml_tensor * visexp_ffn_up    = nullptr;

402
403
404
405
406
407
    // xIELU activation parameters for Apertus
    struct ggml_tensor * ffn_act_alpha_n = nullptr;
    struct ggml_tensor * ffn_act_alpha_p = nullptr;
    struct ggml_tensor * ffn_act_beta    = nullptr;
    struct ggml_tensor * ffn_act_eps     = nullptr;

408
409
410
411
412
    struct ggml_tensor * bskcn_tv = nullptr;

    struct llama_layer_posnet posnet;

    struct llama_layer_convnext convnext;
413
414
415
416

    struct llama_layer_shortconv shortconv;

    struct llama_layer_nextn nextn;
417
418
419
};

struct llama_model {
420
    llm_type type = LLM_TYPE_UNKNOWN;
421
422
423
424
425
426
427
    llm_arch arch = LLM_ARCH_UNKNOWN;

    std::string name = "n/a";

    llama_hparams hparams = {};
    llama_vocab   vocab;

428
429
430
    // for classifier models
    std::vector<std::string> classifier_labels;

431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
    struct ggml_tensor * tok_embd   = nullptr;
    struct ggml_tensor * type_embd  = nullptr;
    struct ggml_tensor * pos_embd   = nullptr;
    struct ggml_tensor * tok_norm   = nullptr;
    struct ggml_tensor * tok_norm_b = nullptr;

    struct ggml_tensor * output_norm     = nullptr;
    struct ggml_tensor * output_norm_b   = nullptr;
    struct ggml_tensor * output          = nullptr;
    struct ggml_tensor * output_b        = nullptr;
    struct ggml_tensor * output_norm_enc = nullptr;

    // classifier
    struct ggml_tensor * cls       = nullptr;
    struct ggml_tensor * cls_b     = nullptr;
    struct ggml_tensor * cls_out   = nullptr;
    struct ggml_tensor * cls_out_b = nullptr;

    struct ggml_tensor * conv1d   = nullptr;
    struct ggml_tensor * conv1d_b = nullptr;

452
453
454
455
456
457
458
    // gemma3n altup
    struct ggml_tensor * tok_embd_per_layer   = nullptr;
    struct ggml_tensor * altup_proj           = nullptr;
    struct ggml_tensor * altup_unembd_proj    = nullptr;
    struct ggml_tensor * per_layer_model_proj = nullptr;
    struct ggml_tensor * per_layer_proj_norm  = nullptr;

459
460
    std::vector<llama_layer> layers;

461
462
463
464
465
466
    //Dense linear projections for SentenceTransformers models like embeddinggemma
    // For Sentence Transformers models structure see
    // https://sbert.net/docs/sentence_transformer/usage/custom_models.html#structure-of-sentence-transformer-models
    struct ggml_tensor * dense_2_out_layers = nullptr;
    struct ggml_tensor * dense_3_out_layers = nullptr;

467
468
    llama_model_params params;

469
470
471
472
473
474
    // gguf metadata
    std::unordered_map<std::string, std::string> gguf_kv;

    // list of devices used in this model
    std::vector<ggml_backend_dev_t> devices;

475
476
    // for quantize-stats only
    std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
477

478
479
    int64_t t_load_us  = 0;
    int64_t t_start_us = 0;
480

481
482
    explicit llama_model(const struct llama_model_params & params);
    ~llama_model();
483

484
485
486
487
488
    void load_stats  (llama_model_loader & ml);
    void load_arch   (llama_model_loader & ml);
    void load_hparams(llama_model_loader & ml);
    void load_vocab  (llama_model_loader & ml);
    bool load_tensors(llama_model_loader & ml); // returns false if cancelled by progress_callback
489

490
491
    std::string arch_name() const;
    std::string type_name() const;
492

493
    std::string desc() const;
494

Daniel Hiltgen's avatar
Daniel Hiltgen committed
495
    size_t size() const; // file size
496
    size_t n_tensors() const;
497
    size_t n_devices() const;
498

Daniel Hiltgen's avatar
Daniel Hiltgen committed
499
500
    std::map<ggml_backend_buffer_type_t, size_t> memory_breakdown() const;

501
502
    // total number of parameters in the model
    uint64_t n_elements() const;
503

504
    void print_info() const;
505

506
507
    ggml_backend_dev_t dev_layer(int il) const;
    ggml_backend_dev_t dev_output() const;
508

509
    ggml_backend_buffer_type_t select_buft(int il) const;
510

511
512
    bool has_tensor_overrides() const;

513
514
    const struct ggml_tensor * get_tensor(const char * name) const;

515
516
517
518
    float get_rope_freq_base (const llama_cparams & cparams, int il) const;
    float get_rope_freq_scale(const llama_cparams & cparams, int il) const;

    ggml_tensor * get_rope_factors(const llama_cparams & cparams, int il) const;
519

520
    // TODO: move this to new llm_arch_model_i interface
Daniel Hiltgen's avatar
Daniel Hiltgen committed
521
    llama_memory_i * create_memory(const llama_memory_params & params, const llama_cparams & cparams) const;
522
523

    // TODO: move this to new llm_arch_model_i interface
524
    ggml_cgraph * build_graph(const llm_graph_params & params) const;
525

526
527
528
private:
    struct impl;
    std::unique_ptr<impl> pimpl;
529
530
531
};

const char * llm_type_name(llm_type type);
532
533
534
535

// For internal test use
// TODO: remove
const std::vector<std::pair<std::string, ggml_tensor *>> & llama_internal_get_tensor_map(const llama_model * model);