llama-arch.h 15.9 KB
Newer Older
1
2
3
4
5
#pragma once

#include "ggml.h" // ggml_op

#include <string>
6
#include <set>
7
8
9
10
11
12

//
// gguf constants (sync with gguf.py)
//

enum llm_arch {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
13
    LLM_ARCH_CLIP,
14
    LLM_ARCH_LLAMA,
15
    LLM_ARCH_LLAMA4,
16
17
18
19
20
21
22
23
24
25
26
27
    LLM_ARCH_DECI,
    LLM_ARCH_FALCON,
    LLM_ARCH_BAICHUAN,
    LLM_ARCH_GROK,
    LLM_ARCH_GPT2,
    LLM_ARCH_GPTJ,
    LLM_ARCH_GPTNEOX,
    LLM_ARCH_MPT,
    LLM_ARCH_STARCODER,
    LLM_ARCH_REFACT,
    LLM_ARCH_BERT,
    LLM_ARCH_NOMIC_BERT,
28
    LLM_ARCH_NOMIC_BERT_MOE,
29
    LLM_ARCH_NEO_BERT,
30
    LLM_ARCH_JINA_BERT_V2,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
31
    LLM_ARCH_JINA_BERT_V3,
32
33
34
35
36
37
    LLM_ARCH_BLOOM,
    LLM_ARCH_STABLELM,
    LLM_ARCH_QWEN,
    LLM_ARCH_QWEN2,
    LLM_ARCH_QWEN2MOE,
    LLM_ARCH_QWEN2VL,
38
39
    LLM_ARCH_QWEN3,
    LLM_ARCH_QWEN3MOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
40
41
42
    LLM_ARCH_QWEN3NEXT,
    LLM_ARCH_QWEN3VL,
    LLM_ARCH_QWEN3VLMOE,
43
44
    LLM_ARCH_PHI2,
    LLM_ARCH_PHI3,
45
    LLM_ARCH_PHIMOE,
46
    LLM_ARCH_PLAMO,
47
    LLM_ARCH_PLAMO2,
48
49
50
51
52
53
54
    LLM_ARCH_CODESHELL,
    LLM_ARCH_ORION,
    LLM_ARCH_INTERNLM2,
    LLM_ARCH_MINICPM,
    LLM_ARCH_MINICPM3,
    LLM_ARCH_GEMMA,
    LLM_ARCH_GEMMA2,
Patrick Devine's avatar
Patrick Devine committed
55
    LLM_ARCH_GEMMA3,
56
    LLM_ARCH_GEMMA3N,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
57
    LLM_ARCH_GEMMA_EMBEDDING,
58
59
    LLM_ARCH_STARCODER2,
    LLM_ARCH_MAMBA,
60
61
62
    LLM_ARCH_MAMBA2,
    LLM_ARCH_JAMBA,
    LLM_ARCH_FALCON_H1,
63
64
65
66
67
68
69
70
71
72
73
74
    LLM_ARCH_XVERSE,
    LLM_ARCH_COMMAND_R,
    LLM_ARCH_COHERE2,
    LLM_ARCH_DBRX,
    LLM_ARCH_OLMO,
    LLM_ARCH_OLMO2,
    LLM_ARCH_OLMOE,
    LLM_ARCH_OPENELM,
    LLM_ARCH_ARCTIC,
    LLM_ARCH_DEEPSEEK,
    LLM_ARCH_DEEPSEEK2,
    LLM_ARCH_CHATGLM,
75
    LLM_ARCH_GLM4,
76
    LLM_ARCH_GLM4_MOE,
77
78
79
80
81
    LLM_ARCH_BITNET,
    LLM_ARCH_T5,
    LLM_ARCH_T5ENCODER,
    LLM_ARCH_JAIS,
    LLM_ARCH_NEMOTRON,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
82
    LLM_ARCH_NEMOTRON_H,
83
    LLM_ARCH_NEMOTRON_H_MOE,
84
    LLM_ARCH_EXAONE,
85
    LLM_ARCH_EXAONE4,
86
    LLM_ARCH_RWKV6,
87
    LLM_ARCH_RWKV6QWEN2,
88
89
    LLM_ARCH_RWKV7,
    LLM_ARCH_ARWKV7,
90
91
    LLM_ARCH_GRANITE,
    LLM_ARCH_GRANITE_MOE,
92
    LLM_ARCH_GRANITE_HYBRID,
93
94
95
    LLM_ARCH_CHAMELEON,
    LLM_ARCH_SOLAR,
    LLM_ARCH_WAVTOKENIZER_DEC,
96
97
    LLM_ARCH_PLM,
    LLM_ARCH_BAILINGMOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
98
    LLM_ARCH_BAILINGMOE2,
99
100
    LLM_ARCH_DOTS1,
    LLM_ARCH_ARCEE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
101
    LLM_ARCH_AFMOE,
102
103
104
105
106
107
108
    LLM_ARCH_ERNIE4_5,
    LLM_ARCH_ERNIE4_5_MOE,
    LLM_ARCH_HUNYUAN_MOE,
    LLM_ARCH_HUNYUAN_DENSE,
    LLM_ARCH_SMOLLM3,
    LLM_ARCH_OPENAI_MOE,
    LLM_ARCH_LFM2,
109
    LLM_ARCH_LFM2MOE,
110
111
112
    LLM_ARCH_DREAM,
    LLM_ARCH_SMALLTHINKER,
    LLM_ARCH_LLADA,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
113
114
115
    LLM_ARCH_LLADA_MOE,
    LLM_ARCH_SEED_OSS,
    LLM_ARCH_GROVEMOE,
116
    LLM_ARCH_APERTUS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
117
118
119
120
    LLM_ARCH_MINIMAX_M2,
    LLM_ARCH_COGVLM,
    LLM_ARCH_RND1,
    LLM_ARCH_PANGU_EMBED,
121
    LLM_ARCH_MISTRAL3,
122
123
124
125
126
127
128
129
    LLM_ARCH_UNKNOWN,
};

enum llm_kv {
    LLM_KV_GENERAL_TYPE,
    LLM_KV_GENERAL_ARCHITECTURE,
    LLM_KV_GENERAL_QUANTIZATION_VERSION,
    LLM_KV_GENERAL_ALIGNMENT,
130
    LLM_KV_GENERAL_FILE_TYPE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
131
132
133
134
135
136
137
138
139
140
141
142
    LLM_KV_GENERAL_SAMPLING_SEQUENCE,
    LLM_KV_GENERAL_SAMPLING_TOP_K,
    LLM_KV_GENERAL_SAMPLING_TOP_P,
    LLM_KV_GENERAL_SAMPLING_MIN_P,
    LLM_KV_GENERAL_SAMPLING_XTC_PROBABILITY,
    LLM_KV_GENERAL_SAMPLING_XTC_THRESHOLD,
    LLM_KV_GENERAL_SAMPLING_TEMP,
    LLM_KV_GENERAL_SAMPLING_PENALTY_LAST_N,
    LLM_KV_GENERAL_SAMPLING_PENALTY_REPEAT,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT_TAU,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT_ETA,
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
    LLM_KV_GENERAL_NAME,
    LLM_KV_GENERAL_AUTHOR,
    LLM_KV_GENERAL_VERSION,
    LLM_KV_GENERAL_URL,
    LLM_KV_GENERAL_DESCRIPTION,
    LLM_KV_GENERAL_LICENSE,
    LLM_KV_GENERAL_SOURCE_URL,
    LLM_KV_GENERAL_SOURCE_HF_REPO,

    LLM_KV_VOCAB_SIZE,
    LLM_KV_CONTEXT_LENGTH,
    LLM_KV_EMBEDDING_LENGTH,
    LLM_KV_FEATURES_LENGTH,
    LLM_KV_BLOCK_COUNT,
    LLM_KV_LEADING_DENSE_BLOCK_COUNT,
    LLM_KV_FEED_FORWARD_LENGTH,
    LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
    LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
161
    LLM_KV_EXPERT_CHUNK_FEED_FORWARD_LENGTH,
162
163
164
165
166
    LLM_KV_USE_PARALLEL_RESIDUAL,
    LLM_KV_TENSOR_DATA_LAYOUT,
    LLM_KV_EXPERT_COUNT,
    LLM_KV_EXPERT_USED_COUNT,
    LLM_KV_EXPERT_SHARED_COUNT,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
167
168
    LLM_KV_EXPERT_GROUP_COUNT,
    LLM_KV_EXPERT_GROUP_USED_COUNT,
169
170
171
    LLM_KV_EXPERT_WEIGHTS_SCALE,
    LLM_KV_EXPERT_WEIGHTS_NORM,
    LLM_KV_EXPERT_GATING_FUNC,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
172
173
    LLM_KV_EXPERT_GROUP_SCALE,
    LLM_KV_EXPERTS_PER_GROUP,
174
    LLM_KV_MOE_EVERY_N_LAYERS,
175
    LLM_KV_NEXTN_PREDICT_LAYERS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
176
    LLM_KV_NUM_DEEPSTACK_LAYERS,
177
178
179
    LLM_KV_POOLING_TYPE,
    LLM_KV_LOGIT_SCALE,
    LLM_KV_DECODER_START_TOKEN_ID,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
180
    LLM_KV_DECODER_BLOCK_COUNT,
181
    LLM_KV_ATTN_LOGIT_SOFTCAPPING,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
182
    LLM_KV_ROUTER_LOGIT_SOFTCAPPING,
183
184
185
186
187
188
189
    LLM_KV_FINAL_LOGIT_SOFTCAPPING,
    LLM_KV_SWIN_NORM,
    LLM_KV_RESCALE_EVERY_N_LAYERS,
    LLM_KV_TIME_MIX_EXTRA_DIM,
    LLM_KV_TIME_DECAY_EXTRA_DIM,
    LLM_KV_RESIDUAL_SCALE,
    LLM_KV_EMBEDDING_SCALE,
190
    LLM_KV_TOKEN_SHIFT_COUNT,
191
    LLM_KV_INTERLEAVE_MOE_LAYER_STEP,
192
193
194
195
196
197
198
199
200
201
202
203
204
205

    LLM_KV_ATTENTION_HEAD_COUNT,
    LLM_KV_ATTENTION_HEAD_COUNT_KV,
    LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
    LLM_KV_ATTENTION_CLAMP_KQV,
    LLM_KV_ATTENTION_KEY_LENGTH,
    LLM_KV_ATTENTION_VALUE_LENGTH,
    LLM_KV_ATTENTION_LAYERNORM_EPS,
    LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
    LLM_KV_ATTENTION_GROUPNORM_EPS,
    LLM_KV_ATTENTION_GROUPNORM_GROUPS,
    LLM_KV_ATTENTION_CAUSAL,
    LLM_KV_ATTENTION_Q_LORA_RANK,
    LLM_KV_ATTENTION_KV_LORA_RANK,
206
207
208
209
    LLM_KV_ATTENTION_DECAY_LORA_RANK,
    LLM_KV_ATTENTION_ICLR_LORA_RANK,
    LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK,
    LLM_KV_ATTENTION_GATE_LORA_RANK,
210
211
212
    LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
    LLM_KV_ATTENTION_SLIDING_WINDOW,
    LLM_KV_ATTENTION_SCALE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
213
214
    LLM_KV_ATTENTION_OUTPUT_SCALE,
    LLM_KV_ATTENTION_TEMPERATURE_LENGTH,
215
    LLM_KV_ATTENTION_TEMPERATURE_SCALE,
216
    LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
217
218
    LLM_KV_ATTENTION_KEY_LENGTH_MLA,
    LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
219
220
221
222
223
224
225
226
227
228
229

    LLM_KV_ROPE_DIMENSION_COUNT,
    LLM_KV_ROPE_DIMENSION_SECTIONS,
    LLM_KV_ROPE_FREQ_BASE,
    LLM_KV_ROPE_SCALE_LINEAR,
    LLM_KV_ROPE_SCALING_TYPE,
    LLM_KV_ROPE_SCALING_FACTOR,
    LLM_KV_ROPE_SCALING_ATTN_FACTOR,
    LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
    LLM_KV_ROPE_SCALING_FINETUNED,
    LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
230
231
232
233
    LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR,
    LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR,
    LLM_KV_ROPE_SCALING_YARN_BETA_FAST,
    LLM_KV_ROPE_SCALING_YARN_BETA_SLOW,
234
235
236
237
238
239
240
241
242

    LLM_KV_SPLIT_NO,
    LLM_KV_SPLIT_COUNT,
    LLM_KV_SPLIT_TENSORS_COUNT,

    LLM_KV_SSM_INNER_SIZE,
    LLM_KV_SSM_CONV_KERNEL,
    LLM_KV_SSM_STATE_SIZE,
    LLM_KV_SSM_TIME_STEP_RANK,
243
    LLM_KV_SSM_GROUP_COUNT,
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
    LLM_KV_SSM_DT_B_C_RMS,

    LLM_KV_WKV_HEAD_SIZE,

    LLM_KV_TOKENIZER_MODEL,
    LLM_KV_TOKENIZER_PRE,
    LLM_KV_TOKENIZER_LIST,
    LLM_KV_TOKENIZER_TOKEN_TYPE,
    LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
    LLM_KV_TOKENIZER_SCORES,
    LLM_KV_TOKENIZER_MERGES,
    LLM_KV_TOKENIZER_BOS_ID,
    LLM_KV_TOKENIZER_EOS_ID,
    LLM_KV_TOKENIZER_EOT_ID,
    LLM_KV_TOKENIZER_EOM_ID,
    LLM_KV_TOKENIZER_UNK_ID,
    LLM_KV_TOKENIZER_SEP_ID,
    LLM_KV_TOKENIZER_PAD_ID,
    LLM_KV_TOKENIZER_CLS_ID,
    LLM_KV_TOKENIZER_MASK_ID,
    LLM_KV_TOKENIZER_ADD_BOS,
    LLM_KV_TOKENIZER_ADD_EOS,
266
    LLM_KV_TOKENIZER_ADD_SEP,
267
268
269
270
271
    LLM_KV_TOKENIZER_ADD_PREFIX,
    LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
    LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
    LLM_KV_TOKENIZER_HF_JSON,
    LLM_KV_TOKENIZER_RWKV,
272
    LLM_KV_TOKENIZER_CHAT_TEMPLATE,
273
274
275
276
277
278
279
280
281
    LLM_KV_TOKENIZER_FIM_PRE_ID,
    LLM_KV_TOKENIZER_FIM_SUF_ID,
    LLM_KV_TOKENIZER_FIM_MID_ID,
    LLM_KV_TOKENIZER_FIM_PAD_ID,
    LLM_KV_TOKENIZER_FIM_REP_ID,
    LLM_KV_TOKENIZER_FIM_SEP_ID,

    LLM_KV_ADAPTER_TYPE,
    LLM_KV_ADAPTER_LORA_ALPHA,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
282
283
284
    LLM_KV_ADAPTER_LORA_TASK_NAME,
    LLM_KV_ADAPTER_LORA_PROMPT_PREFIX,
    LLM_KV_ADAPTER_ALORA_INVOCATION_TOKENS,
285
286
287
288
289
290
291

    LLM_KV_POSNET_EMBEDDING_LENGTH,
    LLM_KV_POSNET_BLOCK_COUNT,

    LLM_KV_CONVNEXT_EMBEDDING_LENGTH,
    LLM_KV_CONVNEXT_BLOCK_COUNT,

292
293
294
295
    LLM_KV_CLASSIFIER_OUTPUT_LABELS,

    LLM_KV_SHORTCONV_L_CACHE,

296
297
298
299
300
    LLM_KV_XIELU_ALPHA_N,
    LLM_KV_XIELU_ALPHA_P,
    LLM_KV_XIELU_BETA,
    LLM_KV_XIELU_EPS,

301
302
303
304
    // deprecated:
    LLM_KV_TOKENIZER_PREFIX_ID,
    LLM_KV_TOKENIZER_SUFFIX_ID,
    LLM_KV_TOKENIZER_MIDDLE_ID,
305
306
307
308
309
310

    // sentence-transformers dense layers in and out features
    LLM_KV_DENSE_2_FEAT_IN,
    LLM_KV_DENSE_2_FEAT_OUT,
    LLM_KV_DENSE_3_FEAT_IN,
    LLM_KV_DENSE_3_FEAT_OUT,
311
312
313
314
315
316
317
};

enum llm_tensor {
    LLM_TENSOR_TOKEN_EMBD,
    LLM_TENSOR_TOKEN_EMBD_NORM,
    LLM_TENSOR_TOKEN_TYPES,
    LLM_TENSOR_POS_EMBD,
318
319
    LLM_TENSOR_DENSE_2_OUT,
    LLM_TENSOR_DENSE_3_OUT,
320
321
    LLM_TENSOR_OUTPUT,
    LLM_TENSOR_OUTPUT_NORM,
322
    LLM_TENSOR_OUTPUT_NORM_LFM2, // fix for wrong tensor name
323
324
325
326
327
328
329
330
331
332
333
334
335
    LLM_TENSOR_ROPE_FREQS,
    LLM_TENSOR_ROPE_FACTORS_LONG,
    LLM_TENSOR_ROPE_FACTORS_SHORT,
    LLM_TENSOR_ATTN_Q,
    LLM_TENSOR_ATTN_K,
    LLM_TENSOR_ATTN_V,
    LLM_TENSOR_ATTN_QKV,
    LLM_TENSOR_ATTN_OUT,
    LLM_TENSOR_ATTN_NORM,
    LLM_TENSOR_ATTN_NORM_2,
    LLM_TENSOR_ATTN_OUT_NORM,
    LLM_TENSOR_ATTN_POST_NORM,
    LLM_TENSOR_ATTN_ROT_EMBD,
336
    LLM_TENSOR_ATTN_SINKS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
337
    LLM_TENSOR_ATTN_GATE,
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
    LLM_TENSOR_FFN_GATE_INP,
    LLM_TENSOR_FFN_GATE_INP_SHEXP,
    LLM_TENSOR_FFN_NORM,
    LLM_TENSOR_FFN_POST_NORM,
    LLM_TENSOR_FFN_GATE,
    LLM_TENSOR_FFN_DOWN,
    LLM_TENSOR_FFN_UP,
    LLM_TENSOR_FFN_ACT,
    LLM_TENSOR_FFN_DOWN_EXP,  // split experts for backward compatibility
    LLM_TENSOR_FFN_GATE_EXP,
    LLM_TENSOR_FFN_UP_EXP,
    LLM_TENSOR_FFN_NORM_EXPS,
    LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
    LLM_TENSOR_FFN_GATE_EXPS,
    LLM_TENSOR_FFN_UP_EXPS,
    LLM_TENSOR_FFN_DOWN_SHEXP,
    LLM_TENSOR_FFN_GATE_SHEXP,
    LLM_TENSOR_FFN_UP_SHEXP,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
356
357
358
    LLM_TENSOR_FFN_DOWN_CHEXPS,
    LLM_TENSOR_FFN_GATE_CHEXPS,
    LLM_TENSOR_FFN_UP_CHEXPS,
359
360
361
362
    LLM_TENSOR_FFN_EXP_PROBS_B,
    LLM_TENSOR_ATTN_Q_NORM,
    LLM_TENSOR_ATTN_K_NORM,
    LLM_TENSOR_LAYER_OUT_NORM,
363
364
    LLM_TENSOR_POST_ATTN_NORM,
    LLM_TENSOR_POST_MLP_NORM,
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
    LLM_TENSOR_PER_LAYER_TOKEN_EMBD, // gemma3n
    LLM_TENSOR_PER_LAYER_MODEL_PROJ, // gemma3n
    LLM_TENSOR_PER_LAYER_INP_GATE,   // gemma3n
    LLM_TENSOR_PER_LAYER_PROJ,       // gemma3n
    LLM_TENSOR_PER_LAYER_PROJ_NORM,  // gemma3n
    LLM_TENSOR_PER_LAYER_POST_NORM,  // gemma3n
    LLM_TENSOR_ALTUP_PROJ,           // gemma3n
    LLM_TENSOR_ALTUP_UNEMBD_PROJ,    // gemma3n
    LLM_TENSOR_ALTUP_CORRECT_COEF,   // gemma3n
    LLM_TENSOR_ALTUP_CORRECT_SCALE,  // gemma3n
    LLM_TENSOR_ALTUP_PREDICT_COEF,   // gemma3n
    LLM_TENSOR_ALTUP_ROUTER,         // gemma3n
    LLM_TENSOR_ALTUP_ROUTER_NORM,    // gemma3n
    LLM_TENSOR_LAUREL_L,             // gemma3n
    LLM_TENSOR_LAUREL_R,             // gemma3n
    LLM_TENSOR_LAUREL_POST_NORM,     // gemma3n
381
382
383
384
    LLM_TENSOR_SSM_IN,
    LLM_TENSOR_SSM_CONV1D,
    LLM_TENSOR_SSM_X,
    LLM_TENSOR_SSM_DT,
385
    LLM_TENSOR_SSM_DT_NORM,
386
    LLM_TENSOR_SSM_A,
387
    LLM_TENSOR_SSM_A_NOSCAN,        // qwen3next special case with MUL instead of SSM_SCAN
388
389
    LLM_TENSOR_SSM_B_NORM,
    LLM_TENSOR_SSM_C_NORM,
390
    LLM_TENSOR_SSM_D,
391
    LLM_TENSOR_SSM_NORM,
392
    LLM_TENSOR_SSM_OUT,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
393
    LLM_TENSOR_SSM_BETA_ALPHA,      // qwen3next
394
    LLM_TENSOR_TIME_MIX_W0,
395
396
    LLM_TENSOR_TIME_MIX_W1,
    LLM_TENSOR_TIME_MIX_W2,
397
398
399
400
401
402
403
404
405
406
407
    LLM_TENSOR_TIME_MIX_A0,
    LLM_TENSOR_TIME_MIX_A1,
    LLM_TENSOR_TIME_MIX_A2,
    LLM_TENSOR_TIME_MIX_V0,
    LLM_TENSOR_TIME_MIX_V1,
    LLM_TENSOR_TIME_MIX_V2,
    LLM_TENSOR_TIME_MIX_G1,
    LLM_TENSOR_TIME_MIX_G2,
    LLM_TENSOR_TIME_MIX_K_K,
    LLM_TENSOR_TIME_MIX_K_A,
    LLM_TENSOR_TIME_MIX_R_K,
408
409
410
411
412
413
    LLM_TENSOR_TIME_MIX_LERP_X,
    LLM_TENSOR_TIME_MIX_LERP_W,
    LLM_TENSOR_TIME_MIX_LERP_K,
    LLM_TENSOR_TIME_MIX_LERP_V,
    LLM_TENSOR_TIME_MIX_LERP_R,
    LLM_TENSOR_TIME_MIX_LERP_G,
414
    LLM_TENSOR_TIME_MIX_LERP_FUSED,
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
    LLM_TENSOR_TIME_MIX_FIRST,
    LLM_TENSOR_TIME_MIX_DECAY,
    LLM_TENSOR_TIME_MIX_DECAY_W1,
    LLM_TENSOR_TIME_MIX_DECAY_W2,
    LLM_TENSOR_TIME_MIX_KEY,
    LLM_TENSOR_TIME_MIX_VALUE,
    LLM_TENSOR_TIME_MIX_RECEPTANCE,
    LLM_TENSOR_TIME_MIX_GATE,
    LLM_TENSOR_TIME_MIX_LN,
    LLM_TENSOR_TIME_MIX_OUTPUT,
    LLM_TENSOR_CHANNEL_MIX_LERP_K,
    LLM_TENSOR_CHANNEL_MIX_LERP_R,
    LLM_TENSOR_CHANNEL_MIX_KEY,
    LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
    LLM_TENSOR_CHANNEL_MIX_VALUE,
    LLM_TENSOR_ATTN_Q_A,
    LLM_TENSOR_ATTN_Q_B,
    LLM_TENSOR_ATTN_KV_A_MQA,
    LLM_TENSOR_ATTN_KV_B,
434
435
    LLM_TENSOR_ATTN_K_B,
    LLM_TENSOR_ATTN_V_B,
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
    LLM_TENSOR_ATTN_Q_A_NORM,
    LLM_TENSOR_ATTN_KV_A_NORM,
    LLM_TENSOR_ATTN_SUB_NORM,
    LLM_TENSOR_FFN_SUB_NORM,
    LLM_TENSOR_DEC_ATTN_NORM,
    LLM_TENSOR_DEC_ATTN_Q,
    LLM_TENSOR_DEC_ATTN_K,
    LLM_TENSOR_DEC_ATTN_V,
    LLM_TENSOR_DEC_ATTN_OUT,
    LLM_TENSOR_DEC_ATTN_REL_B,
    LLM_TENSOR_DEC_CROSS_ATTN_NORM,
    LLM_TENSOR_DEC_CROSS_ATTN_Q,
    LLM_TENSOR_DEC_CROSS_ATTN_K,
    LLM_TENSOR_DEC_CROSS_ATTN_V,
    LLM_TENSOR_DEC_CROSS_ATTN_OUT,
    LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
    LLM_TENSOR_DEC_FFN_NORM,
    LLM_TENSOR_DEC_FFN_GATE,
    LLM_TENSOR_DEC_FFN_DOWN,
    LLM_TENSOR_DEC_FFN_UP,
    LLM_TENSOR_DEC_OUTPUT_NORM,
    LLM_TENSOR_ENC_ATTN_NORM,
    LLM_TENSOR_ENC_ATTN_Q,
    LLM_TENSOR_ENC_ATTN_K,
    LLM_TENSOR_ENC_ATTN_V,
    LLM_TENSOR_ENC_ATTN_OUT,
    LLM_TENSOR_ENC_ATTN_REL_B,
    LLM_TENSOR_ENC_FFN_NORM,
    LLM_TENSOR_ENC_FFN_GATE,
    LLM_TENSOR_ENC_FFN_DOWN,
    LLM_TENSOR_ENC_FFN_UP,
    LLM_TENSOR_ENC_OUTPUT_NORM,
    LLM_TENSOR_CLS,
    LLM_TENSOR_CLS_OUT,
    LLM_TENSOR_BSKCN_TV,
    LLM_TENSOR_CONV1D,
    LLM_TENSOR_CONVNEXT_DW,
    LLM_TENSOR_CONVNEXT_NORM,
    LLM_TENSOR_CONVNEXT_PW1,
    LLM_TENSOR_CONVNEXT_PW2,
    LLM_TENSOR_CONVNEXT_GAMMA,
    LLM_TENSOR_POS_NET_CONV1,
    LLM_TENSOR_POS_NET_CONV2,
    LLM_TENSOR_POS_NET_NORM,
    LLM_TENSOR_POS_NET_NORM1,
    LLM_TENSOR_POS_NET_NORM2,
    LLM_TENSOR_POS_NET_ATTN_NORM,
    LLM_TENSOR_POS_NET_ATTN_Q,
    LLM_TENSOR_POS_NET_ATTN_K,
    LLM_TENSOR_POS_NET_ATTN_V,
    LLM_TENSOR_POS_NET_ATTN_OUT,
487
488
489
    LLM_TENSOR_SHORTCONV_CONV,
    LLM_TENSOR_SHORTCONV_INPROJ,
    LLM_TENSOR_SHORTCONV_OUTPROJ,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
490
491
492
493
494
    LLM_TENSOR_VISEXP_ATTN_QKV,
    LLM_TENSOR_VISEXP_ATTN_OUT,
    LLM_TENSOR_VISEXP_FFN_GATE,
    LLM_TENSOR_VISEXP_FFN_DOWN,
    LLM_TENSOR_VISEXP_FFN_UP,
495
496
497
498
499
500
    LLM_TENSOR_NEXTN_EH_PROJ,
    LLM_TENSOR_NEXTN_EMBED_TOKENS,
    LLM_TENSOR_NEXTN_ENORM,
    LLM_TENSOR_NEXTN_HNORM,
    LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
    LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
501
502
503
504
505
506
507
508
509
};

enum llm_tensor_layer {
    LLM_TENSOR_LAYER_INPUT,
    LLM_TENSOR_LAYER_REPEATING,
    LLM_TENSOR_LAYER_OUTPUT,
};

struct LLM_KV {
510
    LLM_KV(llm_arch arch, const char * suffix = nullptr);
511
512

    llm_arch arch;
513
    const char * suffix;
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533

    std::string operator()(llm_kv kv) const;
};

// helper to handle gguf constants
// usage:
//
//   const auto tn = LLM_TN(LLM_ARCH_LLAMA);
//
//   std::string name = tn(LLM_TENSOR_OUTPUT);                     -> "output"
//   std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias");         -> "token_embd.bias"
//   std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3);     -> "blk.3.attn_norm.weight"
//
struct LLM_TN_IMPL {
    const llm_arch arch;
    const llm_tensor tensor;
    const char * const suffix;
    const int bid;
    const int xid;

534
535
536
537
    const std::set<llm_tensor> model_tensors;

    LLM_TN_IMPL(llm_arch arch, llm_tensor tensor, const char * suffix, int bid, int xid);

538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
    std::string str() const;

    operator std::string() const {
        return str();
    }

    friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) {
        return str == tn.str();
    }

    friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) {
        return str != tn.str();
    }
};

struct LLM_TN {
    LLM_TN(llm_arch arch) : arch(arch) {}

    llm_arch arch;

    LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const {
559
        return LLM_TN_IMPL(arch, tensor, suffix, bid, xid);
560
561
562
    }

    LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const {
563
        return LLM_TN_IMPL(arch, tensor, nullptr, bid, xid);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
    }
};


struct llm_tensor_info {
    llm_tensor_layer layer;
    ggml_op op;
};

const char * llm_arch_name(llm_arch arch);

llm_arch llm_arch_from_string(const std::string & name);

const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
578
579
580
581

bool llm_arch_is_recurrent(const llm_arch & arch);
bool llm_arch_is_hybrid   (const llm_arch & arch);
bool llm_arch_is_diffusion(const llm_arch & arch);