llama-arch.h 15.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
#pragma once

#include "ggml.h" // ggml_op

#include <string>

//
// gguf constants (sync with gguf.py)
//

enum llm_arch {
Daniel Hiltgen's avatar
Daniel Hiltgen committed
12
    LLM_ARCH_CLIP,
13
    LLM_ARCH_LLAMA,
14
    LLM_ARCH_LLAMA4,
15
16
17
18
19
20
21
22
23
24
25
26
    LLM_ARCH_DECI,
    LLM_ARCH_FALCON,
    LLM_ARCH_BAICHUAN,
    LLM_ARCH_GROK,
    LLM_ARCH_GPT2,
    LLM_ARCH_GPTJ,
    LLM_ARCH_GPTNEOX,
    LLM_ARCH_MPT,
    LLM_ARCH_STARCODER,
    LLM_ARCH_REFACT,
    LLM_ARCH_BERT,
    LLM_ARCH_NOMIC_BERT,
27
    LLM_ARCH_NOMIC_BERT_MOE,
28
    LLM_ARCH_NEO_BERT,
29
    LLM_ARCH_JINA_BERT_V2,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
30
    LLM_ARCH_JINA_BERT_V3,
31
32
33
34
35
36
    LLM_ARCH_BLOOM,
    LLM_ARCH_STABLELM,
    LLM_ARCH_QWEN,
    LLM_ARCH_QWEN2,
    LLM_ARCH_QWEN2MOE,
    LLM_ARCH_QWEN2VL,
37
38
    LLM_ARCH_QWEN3,
    LLM_ARCH_QWEN3MOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
39
40
41
    LLM_ARCH_QWEN3NEXT,
    LLM_ARCH_QWEN3VL,
    LLM_ARCH_QWEN3VLMOE,
42
43
    LLM_ARCH_PHI2,
    LLM_ARCH_PHI3,
44
    LLM_ARCH_PHIMOE,
45
    LLM_ARCH_PLAMO,
46
    LLM_ARCH_PLAMO2,
47
48
49
50
51
52
53
    LLM_ARCH_CODESHELL,
    LLM_ARCH_ORION,
    LLM_ARCH_INTERNLM2,
    LLM_ARCH_MINICPM,
    LLM_ARCH_MINICPM3,
    LLM_ARCH_GEMMA,
    LLM_ARCH_GEMMA2,
Patrick Devine's avatar
Patrick Devine committed
54
    LLM_ARCH_GEMMA3,
55
    LLM_ARCH_GEMMA3N,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
56
    LLM_ARCH_GEMMA_EMBEDDING,
57
58
    LLM_ARCH_STARCODER2,
    LLM_ARCH_MAMBA,
59
60
61
    LLM_ARCH_MAMBA2,
    LLM_ARCH_JAMBA,
    LLM_ARCH_FALCON_H1,
62
63
64
65
66
67
68
69
70
71
72
73
    LLM_ARCH_XVERSE,
    LLM_ARCH_COMMAND_R,
    LLM_ARCH_COHERE2,
    LLM_ARCH_DBRX,
    LLM_ARCH_OLMO,
    LLM_ARCH_OLMO2,
    LLM_ARCH_OLMOE,
    LLM_ARCH_OPENELM,
    LLM_ARCH_ARCTIC,
    LLM_ARCH_DEEPSEEK,
    LLM_ARCH_DEEPSEEK2,
    LLM_ARCH_CHATGLM,
74
    LLM_ARCH_GLM4,
75
    LLM_ARCH_GLM4_MOE,
76
77
78
79
80
    LLM_ARCH_BITNET,
    LLM_ARCH_T5,
    LLM_ARCH_T5ENCODER,
    LLM_ARCH_JAIS,
    LLM_ARCH_NEMOTRON,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
81
    LLM_ARCH_NEMOTRON_H,
82
    LLM_ARCH_EXAONE,
83
    LLM_ARCH_EXAONE4,
84
    LLM_ARCH_RWKV6,
85
    LLM_ARCH_RWKV6QWEN2,
86
87
    LLM_ARCH_RWKV7,
    LLM_ARCH_ARWKV7,
88
89
    LLM_ARCH_GRANITE,
    LLM_ARCH_GRANITE_MOE,
90
    LLM_ARCH_GRANITE_HYBRID,
91
92
93
    LLM_ARCH_CHAMELEON,
    LLM_ARCH_SOLAR,
    LLM_ARCH_WAVTOKENIZER_DEC,
94
95
    LLM_ARCH_PLM,
    LLM_ARCH_BAILINGMOE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
96
    LLM_ARCH_BAILINGMOE2,
97
98
    LLM_ARCH_DOTS1,
    LLM_ARCH_ARCEE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
99
    LLM_ARCH_AFMOE,
100
101
102
103
104
105
106
    LLM_ARCH_ERNIE4_5,
    LLM_ARCH_ERNIE4_5_MOE,
    LLM_ARCH_HUNYUAN_MOE,
    LLM_ARCH_HUNYUAN_DENSE,
    LLM_ARCH_SMOLLM3,
    LLM_ARCH_OPENAI_MOE,
    LLM_ARCH_LFM2,
107
    LLM_ARCH_LFM2MOE,
108
109
110
    LLM_ARCH_DREAM,
    LLM_ARCH_SMALLTHINKER,
    LLM_ARCH_LLADA,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
111
112
113
    LLM_ARCH_LLADA_MOE,
    LLM_ARCH_SEED_OSS,
    LLM_ARCH_GROVEMOE,
114
    LLM_ARCH_APERTUS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
115
116
117
118
    LLM_ARCH_MINIMAX_M2,
    LLM_ARCH_COGVLM,
    LLM_ARCH_RND1,
    LLM_ARCH_PANGU_EMBED,
119
    LLM_ARCH_MISTRAL3,
120
121
122
123
124
125
126
127
    LLM_ARCH_UNKNOWN,
};

enum llm_kv {
    LLM_KV_GENERAL_TYPE,
    LLM_KV_GENERAL_ARCHITECTURE,
    LLM_KV_GENERAL_QUANTIZATION_VERSION,
    LLM_KV_GENERAL_ALIGNMENT,
128
    LLM_KV_GENERAL_FILE_TYPE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
129
130
131
132
133
134
135
136
137
138
139
140
    LLM_KV_GENERAL_SAMPLING_SEQUENCE,
    LLM_KV_GENERAL_SAMPLING_TOP_K,
    LLM_KV_GENERAL_SAMPLING_TOP_P,
    LLM_KV_GENERAL_SAMPLING_MIN_P,
    LLM_KV_GENERAL_SAMPLING_XTC_PROBABILITY,
    LLM_KV_GENERAL_SAMPLING_XTC_THRESHOLD,
    LLM_KV_GENERAL_SAMPLING_TEMP,
    LLM_KV_GENERAL_SAMPLING_PENALTY_LAST_N,
    LLM_KV_GENERAL_SAMPLING_PENALTY_REPEAT,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT_TAU,
    LLM_KV_GENERAL_SAMPLING_MIROSTAT_ETA,
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
    LLM_KV_GENERAL_NAME,
    LLM_KV_GENERAL_AUTHOR,
    LLM_KV_GENERAL_VERSION,
    LLM_KV_GENERAL_URL,
    LLM_KV_GENERAL_DESCRIPTION,
    LLM_KV_GENERAL_LICENSE,
    LLM_KV_GENERAL_SOURCE_URL,
    LLM_KV_GENERAL_SOURCE_HF_REPO,

    LLM_KV_VOCAB_SIZE,
    LLM_KV_CONTEXT_LENGTH,
    LLM_KV_EMBEDDING_LENGTH,
    LLM_KV_FEATURES_LENGTH,
    LLM_KV_BLOCK_COUNT,
    LLM_KV_LEADING_DENSE_BLOCK_COUNT,
    LLM_KV_FEED_FORWARD_LENGTH,
    LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
    LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
159
    LLM_KV_EXPERT_CHUNK_FEED_FORWARD_LENGTH,
160
161
162
163
164
    LLM_KV_USE_PARALLEL_RESIDUAL,
    LLM_KV_TENSOR_DATA_LAYOUT,
    LLM_KV_EXPERT_COUNT,
    LLM_KV_EXPERT_USED_COUNT,
    LLM_KV_EXPERT_SHARED_COUNT,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
165
166
    LLM_KV_EXPERT_GROUP_COUNT,
    LLM_KV_EXPERT_GROUP_USED_COUNT,
167
168
169
    LLM_KV_EXPERT_WEIGHTS_SCALE,
    LLM_KV_EXPERT_WEIGHTS_NORM,
    LLM_KV_EXPERT_GATING_FUNC,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
170
171
    LLM_KV_EXPERT_GROUP_SCALE,
    LLM_KV_EXPERTS_PER_GROUP,
172
    LLM_KV_MOE_EVERY_N_LAYERS,
173
    LLM_KV_NEXTN_PREDICT_LAYERS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
174
    LLM_KV_NUM_DEEPSTACK_LAYERS,
175
176
177
    LLM_KV_POOLING_TYPE,
    LLM_KV_LOGIT_SCALE,
    LLM_KV_DECODER_START_TOKEN_ID,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
178
    LLM_KV_DECODER_BLOCK_COUNT,
179
    LLM_KV_ATTN_LOGIT_SOFTCAPPING,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
180
    LLM_KV_ROUTER_LOGIT_SOFTCAPPING,
181
182
183
184
185
186
187
    LLM_KV_FINAL_LOGIT_SOFTCAPPING,
    LLM_KV_SWIN_NORM,
    LLM_KV_RESCALE_EVERY_N_LAYERS,
    LLM_KV_TIME_MIX_EXTRA_DIM,
    LLM_KV_TIME_DECAY_EXTRA_DIM,
    LLM_KV_RESIDUAL_SCALE,
    LLM_KV_EMBEDDING_SCALE,
188
    LLM_KV_TOKEN_SHIFT_COUNT,
189
    LLM_KV_INTERLEAVE_MOE_LAYER_STEP,
190
191
192
193
194
195
196
197
198
199
200
201
202
203

    LLM_KV_ATTENTION_HEAD_COUNT,
    LLM_KV_ATTENTION_HEAD_COUNT_KV,
    LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
    LLM_KV_ATTENTION_CLAMP_KQV,
    LLM_KV_ATTENTION_KEY_LENGTH,
    LLM_KV_ATTENTION_VALUE_LENGTH,
    LLM_KV_ATTENTION_LAYERNORM_EPS,
    LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
    LLM_KV_ATTENTION_GROUPNORM_EPS,
    LLM_KV_ATTENTION_GROUPNORM_GROUPS,
    LLM_KV_ATTENTION_CAUSAL,
    LLM_KV_ATTENTION_Q_LORA_RANK,
    LLM_KV_ATTENTION_KV_LORA_RANK,
204
205
206
207
    LLM_KV_ATTENTION_DECAY_LORA_RANK,
    LLM_KV_ATTENTION_ICLR_LORA_RANK,
    LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK,
    LLM_KV_ATTENTION_GATE_LORA_RANK,
208
209
210
    LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
    LLM_KV_ATTENTION_SLIDING_WINDOW,
    LLM_KV_ATTENTION_SCALE,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
211
212
    LLM_KV_ATTENTION_OUTPUT_SCALE,
    LLM_KV_ATTENTION_TEMPERATURE_LENGTH,
213
    LLM_KV_ATTENTION_TEMPERATURE_SCALE,
214
    LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
215
216
    LLM_KV_ATTENTION_KEY_LENGTH_MLA,
    LLM_KV_ATTENTION_VALUE_LENGTH_MLA,
217
218
219
220
221
222
223
224
225
226
227

    LLM_KV_ROPE_DIMENSION_COUNT,
    LLM_KV_ROPE_DIMENSION_SECTIONS,
    LLM_KV_ROPE_FREQ_BASE,
    LLM_KV_ROPE_SCALE_LINEAR,
    LLM_KV_ROPE_SCALING_TYPE,
    LLM_KV_ROPE_SCALING_FACTOR,
    LLM_KV_ROPE_SCALING_ATTN_FACTOR,
    LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
    LLM_KV_ROPE_SCALING_FINETUNED,
    LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
228
229
230
231
    LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR,
    LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR,
    LLM_KV_ROPE_SCALING_YARN_BETA_FAST,
    LLM_KV_ROPE_SCALING_YARN_BETA_SLOW,
232
233
234
235
236
237
238
239
240

    LLM_KV_SPLIT_NO,
    LLM_KV_SPLIT_COUNT,
    LLM_KV_SPLIT_TENSORS_COUNT,

    LLM_KV_SSM_INNER_SIZE,
    LLM_KV_SSM_CONV_KERNEL,
    LLM_KV_SSM_STATE_SIZE,
    LLM_KV_SSM_TIME_STEP_RANK,
241
    LLM_KV_SSM_GROUP_COUNT,
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
    LLM_KV_SSM_DT_B_C_RMS,

    LLM_KV_WKV_HEAD_SIZE,

    LLM_KV_TOKENIZER_MODEL,
    LLM_KV_TOKENIZER_PRE,
    LLM_KV_TOKENIZER_LIST,
    LLM_KV_TOKENIZER_TOKEN_TYPE,
    LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
    LLM_KV_TOKENIZER_SCORES,
    LLM_KV_TOKENIZER_MERGES,
    LLM_KV_TOKENIZER_BOS_ID,
    LLM_KV_TOKENIZER_EOS_ID,
    LLM_KV_TOKENIZER_EOT_ID,
    LLM_KV_TOKENIZER_EOM_ID,
    LLM_KV_TOKENIZER_UNK_ID,
    LLM_KV_TOKENIZER_SEP_ID,
    LLM_KV_TOKENIZER_PAD_ID,
    LLM_KV_TOKENIZER_CLS_ID,
    LLM_KV_TOKENIZER_MASK_ID,
    LLM_KV_TOKENIZER_ADD_BOS,
    LLM_KV_TOKENIZER_ADD_EOS,
264
    LLM_KV_TOKENIZER_ADD_SEP,
265
266
267
268
269
    LLM_KV_TOKENIZER_ADD_PREFIX,
    LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
    LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
    LLM_KV_TOKENIZER_HF_JSON,
    LLM_KV_TOKENIZER_RWKV,
270
    LLM_KV_TOKENIZER_CHAT_TEMPLATE,
271
272
273
274
275
276
277
278
279
    LLM_KV_TOKENIZER_FIM_PRE_ID,
    LLM_KV_TOKENIZER_FIM_SUF_ID,
    LLM_KV_TOKENIZER_FIM_MID_ID,
    LLM_KV_TOKENIZER_FIM_PAD_ID,
    LLM_KV_TOKENIZER_FIM_REP_ID,
    LLM_KV_TOKENIZER_FIM_SEP_ID,

    LLM_KV_ADAPTER_TYPE,
    LLM_KV_ADAPTER_LORA_ALPHA,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
280
281
282
    LLM_KV_ADAPTER_LORA_TASK_NAME,
    LLM_KV_ADAPTER_LORA_PROMPT_PREFIX,
    LLM_KV_ADAPTER_ALORA_INVOCATION_TOKENS,
283
284
285
286
287
288
289

    LLM_KV_POSNET_EMBEDDING_LENGTH,
    LLM_KV_POSNET_BLOCK_COUNT,

    LLM_KV_CONVNEXT_EMBEDDING_LENGTH,
    LLM_KV_CONVNEXT_BLOCK_COUNT,

290
291
292
293
    LLM_KV_CLASSIFIER_OUTPUT_LABELS,

    LLM_KV_SHORTCONV_L_CACHE,

294
295
296
297
298
    LLM_KV_XIELU_ALPHA_N,
    LLM_KV_XIELU_ALPHA_P,
    LLM_KV_XIELU_BETA,
    LLM_KV_XIELU_EPS,

299
300
301
302
    // deprecated:
    LLM_KV_TOKENIZER_PREFIX_ID,
    LLM_KV_TOKENIZER_SUFFIX_ID,
    LLM_KV_TOKENIZER_MIDDLE_ID,
303
304
305
306
307
308

    // sentence-transformers dense layers in and out features
    LLM_KV_DENSE_2_FEAT_IN,
    LLM_KV_DENSE_2_FEAT_OUT,
    LLM_KV_DENSE_3_FEAT_IN,
    LLM_KV_DENSE_3_FEAT_OUT,
309
310
311
312
313
314
315
};

enum llm_tensor {
    LLM_TENSOR_TOKEN_EMBD,
    LLM_TENSOR_TOKEN_EMBD_NORM,
    LLM_TENSOR_TOKEN_TYPES,
    LLM_TENSOR_POS_EMBD,
316
317
    LLM_TENSOR_DENSE_2_OUT,
    LLM_TENSOR_DENSE_3_OUT,
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
    LLM_TENSOR_OUTPUT,
    LLM_TENSOR_OUTPUT_NORM,
    LLM_TENSOR_ROPE_FREQS,
    LLM_TENSOR_ROPE_FACTORS_LONG,
    LLM_TENSOR_ROPE_FACTORS_SHORT,
    LLM_TENSOR_ATTN_Q,
    LLM_TENSOR_ATTN_K,
    LLM_TENSOR_ATTN_V,
    LLM_TENSOR_ATTN_QKV,
    LLM_TENSOR_ATTN_OUT,
    LLM_TENSOR_ATTN_NORM,
    LLM_TENSOR_ATTN_NORM_2,
    LLM_TENSOR_ATTN_OUT_NORM,
    LLM_TENSOR_ATTN_POST_NORM,
    LLM_TENSOR_ATTN_ROT_EMBD,
333
    LLM_TENSOR_ATTN_SINKS,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
334
    LLM_TENSOR_ATTN_GATE,
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
    LLM_TENSOR_FFN_GATE_INP,
    LLM_TENSOR_FFN_GATE_INP_SHEXP,
    LLM_TENSOR_FFN_NORM,
    LLM_TENSOR_FFN_POST_NORM,
    LLM_TENSOR_FFN_GATE,
    LLM_TENSOR_FFN_DOWN,
    LLM_TENSOR_FFN_UP,
    LLM_TENSOR_FFN_ACT,
    LLM_TENSOR_FFN_DOWN_EXP,  // split experts for backward compatibility
    LLM_TENSOR_FFN_GATE_EXP,
    LLM_TENSOR_FFN_UP_EXP,
    LLM_TENSOR_FFN_NORM_EXPS,
    LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
    LLM_TENSOR_FFN_GATE_EXPS,
    LLM_TENSOR_FFN_UP_EXPS,
    LLM_TENSOR_FFN_DOWN_SHEXP,
    LLM_TENSOR_FFN_GATE_SHEXP,
    LLM_TENSOR_FFN_UP_SHEXP,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
353
354
355
    LLM_TENSOR_FFN_DOWN_CHEXPS,
    LLM_TENSOR_FFN_GATE_CHEXPS,
    LLM_TENSOR_FFN_UP_CHEXPS,
356
357
358
359
    LLM_TENSOR_FFN_EXP_PROBS_B,
    LLM_TENSOR_ATTN_Q_NORM,
    LLM_TENSOR_ATTN_K_NORM,
    LLM_TENSOR_LAYER_OUT_NORM,
360
361
    LLM_TENSOR_POST_ATTN_NORM,
    LLM_TENSOR_POST_MLP_NORM,
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
    LLM_TENSOR_PER_LAYER_TOKEN_EMBD, // gemma3n
    LLM_TENSOR_PER_LAYER_MODEL_PROJ, // gemma3n
    LLM_TENSOR_PER_LAYER_INP_GATE,   // gemma3n
    LLM_TENSOR_PER_LAYER_PROJ,       // gemma3n
    LLM_TENSOR_PER_LAYER_PROJ_NORM,  // gemma3n
    LLM_TENSOR_PER_LAYER_POST_NORM,  // gemma3n
    LLM_TENSOR_ALTUP_PROJ,           // gemma3n
    LLM_TENSOR_ALTUP_UNEMBD_PROJ,    // gemma3n
    LLM_TENSOR_ALTUP_CORRECT_COEF,   // gemma3n
    LLM_TENSOR_ALTUP_CORRECT_SCALE,  // gemma3n
    LLM_TENSOR_ALTUP_PREDICT_COEF,   // gemma3n
    LLM_TENSOR_ALTUP_ROUTER,         // gemma3n
    LLM_TENSOR_ALTUP_ROUTER_NORM,    // gemma3n
    LLM_TENSOR_LAUREL_L,             // gemma3n
    LLM_TENSOR_LAUREL_R,             // gemma3n
    LLM_TENSOR_LAUREL_POST_NORM,     // gemma3n
378
379
380
381
    LLM_TENSOR_SSM_IN,
    LLM_TENSOR_SSM_CONV1D,
    LLM_TENSOR_SSM_X,
    LLM_TENSOR_SSM_DT,
382
    LLM_TENSOR_SSM_DT_NORM,
383
    LLM_TENSOR_SSM_A,
384
    LLM_TENSOR_SSM_A_NOSCAN,        // qwen3next special case with MUL instead of SSM_SCAN
385
386
    LLM_TENSOR_SSM_B_NORM,
    LLM_TENSOR_SSM_C_NORM,
387
    LLM_TENSOR_SSM_D,
388
    LLM_TENSOR_SSM_NORM,
389
    LLM_TENSOR_SSM_OUT,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
390
    LLM_TENSOR_SSM_BETA_ALPHA,      // qwen3next
391
    LLM_TENSOR_TIME_MIX_W0,
392
393
    LLM_TENSOR_TIME_MIX_W1,
    LLM_TENSOR_TIME_MIX_W2,
394
395
396
397
398
399
400
401
402
403
404
    LLM_TENSOR_TIME_MIX_A0,
    LLM_TENSOR_TIME_MIX_A1,
    LLM_TENSOR_TIME_MIX_A2,
    LLM_TENSOR_TIME_MIX_V0,
    LLM_TENSOR_TIME_MIX_V1,
    LLM_TENSOR_TIME_MIX_V2,
    LLM_TENSOR_TIME_MIX_G1,
    LLM_TENSOR_TIME_MIX_G2,
    LLM_TENSOR_TIME_MIX_K_K,
    LLM_TENSOR_TIME_MIX_K_A,
    LLM_TENSOR_TIME_MIX_R_K,
405
406
407
408
409
410
    LLM_TENSOR_TIME_MIX_LERP_X,
    LLM_TENSOR_TIME_MIX_LERP_W,
    LLM_TENSOR_TIME_MIX_LERP_K,
    LLM_TENSOR_TIME_MIX_LERP_V,
    LLM_TENSOR_TIME_MIX_LERP_R,
    LLM_TENSOR_TIME_MIX_LERP_G,
411
    LLM_TENSOR_TIME_MIX_LERP_FUSED,
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
    LLM_TENSOR_TIME_MIX_FIRST,
    LLM_TENSOR_TIME_MIX_DECAY,
    LLM_TENSOR_TIME_MIX_DECAY_W1,
    LLM_TENSOR_TIME_MIX_DECAY_W2,
    LLM_TENSOR_TIME_MIX_KEY,
    LLM_TENSOR_TIME_MIX_VALUE,
    LLM_TENSOR_TIME_MIX_RECEPTANCE,
    LLM_TENSOR_TIME_MIX_GATE,
    LLM_TENSOR_TIME_MIX_LN,
    LLM_TENSOR_TIME_MIX_OUTPUT,
    LLM_TENSOR_CHANNEL_MIX_LERP_K,
    LLM_TENSOR_CHANNEL_MIX_LERP_R,
    LLM_TENSOR_CHANNEL_MIX_KEY,
    LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
    LLM_TENSOR_CHANNEL_MIX_VALUE,
    LLM_TENSOR_ATTN_Q_A,
    LLM_TENSOR_ATTN_Q_B,
    LLM_TENSOR_ATTN_KV_A_MQA,
    LLM_TENSOR_ATTN_KV_B,
431
432
    LLM_TENSOR_ATTN_K_B,
    LLM_TENSOR_ATTN_V_B,
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
    LLM_TENSOR_ATTN_Q_A_NORM,
    LLM_TENSOR_ATTN_KV_A_NORM,
    LLM_TENSOR_ATTN_SUB_NORM,
    LLM_TENSOR_FFN_SUB_NORM,
    LLM_TENSOR_DEC_ATTN_NORM,
    LLM_TENSOR_DEC_ATTN_Q,
    LLM_TENSOR_DEC_ATTN_K,
    LLM_TENSOR_DEC_ATTN_V,
    LLM_TENSOR_DEC_ATTN_OUT,
    LLM_TENSOR_DEC_ATTN_REL_B,
    LLM_TENSOR_DEC_CROSS_ATTN_NORM,
    LLM_TENSOR_DEC_CROSS_ATTN_Q,
    LLM_TENSOR_DEC_CROSS_ATTN_K,
    LLM_TENSOR_DEC_CROSS_ATTN_V,
    LLM_TENSOR_DEC_CROSS_ATTN_OUT,
    LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
    LLM_TENSOR_DEC_FFN_NORM,
    LLM_TENSOR_DEC_FFN_GATE,
    LLM_TENSOR_DEC_FFN_DOWN,
    LLM_TENSOR_DEC_FFN_UP,
    LLM_TENSOR_DEC_OUTPUT_NORM,
    LLM_TENSOR_ENC_ATTN_NORM,
    LLM_TENSOR_ENC_ATTN_Q,
    LLM_TENSOR_ENC_ATTN_K,
    LLM_TENSOR_ENC_ATTN_V,
    LLM_TENSOR_ENC_ATTN_OUT,
    LLM_TENSOR_ENC_ATTN_REL_B,
    LLM_TENSOR_ENC_FFN_NORM,
    LLM_TENSOR_ENC_FFN_GATE,
    LLM_TENSOR_ENC_FFN_DOWN,
    LLM_TENSOR_ENC_FFN_UP,
    LLM_TENSOR_ENC_OUTPUT_NORM,
    LLM_TENSOR_CLS,
    LLM_TENSOR_CLS_OUT,
    LLM_TENSOR_BSKCN_TV,
    LLM_TENSOR_CONV1D,
    LLM_TENSOR_CONVNEXT_DW,
    LLM_TENSOR_CONVNEXT_NORM,
    LLM_TENSOR_CONVNEXT_PW1,
    LLM_TENSOR_CONVNEXT_PW2,
    LLM_TENSOR_CONVNEXT_GAMMA,
    LLM_TENSOR_POS_NET_CONV1,
    LLM_TENSOR_POS_NET_CONV2,
    LLM_TENSOR_POS_NET_NORM,
    LLM_TENSOR_POS_NET_NORM1,
    LLM_TENSOR_POS_NET_NORM2,
    LLM_TENSOR_POS_NET_ATTN_NORM,
    LLM_TENSOR_POS_NET_ATTN_Q,
    LLM_TENSOR_POS_NET_ATTN_K,
    LLM_TENSOR_POS_NET_ATTN_V,
    LLM_TENSOR_POS_NET_ATTN_OUT,
484
485
486
    LLM_TENSOR_SHORTCONV_CONV,
    LLM_TENSOR_SHORTCONV_INPROJ,
    LLM_TENSOR_SHORTCONV_OUTPROJ,
Daniel Hiltgen's avatar
Daniel Hiltgen committed
487
488
489
490
491
    LLM_TENSOR_VISEXP_ATTN_QKV,
    LLM_TENSOR_VISEXP_ATTN_OUT,
    LLM_TENSOR_VISEXP_FFN_GATE,
    LLM_TENSOR_VISEXP_FFN_DOWN,
    LLM_TENSOR_VISEXP_FFN_UP,
492
493
494
495
496
497
    LLM_TENSOR_NEXTN_EH_PROJ,
    LLM_TENSOR_NEXTN_EMBED_TOKENS,
    LLM_TENSOR_NEXTN_ENORM,
    LLM_TENSOR_NEXTN_HNORM,
    LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
    LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
498
499
500
501
502
503
504
505
506
};

enum llm_tensor_layer {
    LLM_TENSOR_LAYER_INPUT,
    LLM_TENSOR_LAYER_REPEATING,
    LLM_TENSOR_LAYER_OUTPUT,
};

struct LLM_KV {
507
    LLM_KV(llm_arch arch, const char * suffix = nullptr);
508
509

    llm_arch arch;
510
    const char * suffix;
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570

    std::string operator()(llm_kv kv) const;
};

// helper to handle gguf constants
// usage:
//
//   const auto tn = LLM_TN(LLM_ARCH_LLAMA);
//
//   std::string name = tn(LLM_TENSOR_OUTPUT);                     -> "output"
//   std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias");         -> "token_embd.bias"
//   std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3);     -> "blk.3.attn_norm.weight"
//
struct LLM_TN_IMPL {
    const llm_arch arch;
    const llm_tensor tensor;
    const char * const suffix;
    const int bid;
    const int xid;

    std::string str() const;

    operator std::string() const {
        return str();
    }

    friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) {
        return str == tn.str();
    }

    friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) {
        return str != tn.str();
    }
};

struct LLM_TN {
    LLM_TN(llm_arch arch) : arch(arch) {}

    llm_arch arch;

    LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const {
        return { arch, tensor, suffix, bid, xid };
    }

    LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const {
        return { arch, tensor, nullptr, bid, xid };
    }
};


struct llm_tensor_info {
    llm_tensor_layer layer;
    ggml_op op;
};

const char * llm_arch_name(llm_arch arch);

llm_arch llm_arch_from_string(const std::string & name);

const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor);
571
572
573
574

bool llm_arch_is_recurrent(const llm_arch & arch);
bool llm_arch_is_hybrid   (const llm_arch & arch);
bool llm_arch_is_diffusion(const llm_arch & arch);