DeepSeek-V3-Chat.yaml 2.59 KB
Newer Older
1
- match:
Azure's avatar
Azure committed
2
    class: ktransformers.models.modeling_deepseek_v3.DeepseekV3RotaryEmbedding
3
  replace:
liam's avatar
liam committed
4
    class: ktransformers.operators.RoPE.YarnRotaryEmbeddingV3
5
6
7
    kwargs:
      generate_device: "cuda"
      prefill_device: "cuda"
Atream's avatar
Atream committed
8
9
10
11
12
13
14
15
16
17
18
19

- match:
    name: "^lm_head$"  # regular expression 
    class: torch.nn.Linear  # only match modules matching name and class simultaneously
  replace:
    class: ktransformers.operators.linear.KTransformersLinear  # optimized Kernel on quantized data types
    kwargs:
      generate_device: "cuda"
      prefill_device: "cuda"
      generate_op: "KLinearMarlin"
      prefill_op: "KLinearTorch"

20
21
22
23
24
25
26
27
28
29
30
31
- match:
    name: "^model\\.layers\\.(?!.*self_attn\\.kv_b_proj).*$"  # regular expression 
    class: torch.nn.Linear  # only match modules matching name and class simultaneously
  replace:
    class: ktransformers.operators.linear.KTransformersLinear  # optimized Kernel on quantized data types
    kwargs:
      generate_device: "cuda"
      prefill_device: "cuda"
      generate_op: "KLinearMarlin"
      prefill_op: "KLinearTorch"
- match:
    name: "^model\\.layers\\..*\\.mlp$"
Azure's avatar
Azure committed
32
    class: ktransformers.models.modeling_deepseek_v3.DeepseekV3MoE
33
  replace:
Azure's avatar
Azure committed
34
    class: ktransformers.operators.experts.KDeepseekV3MoE     # mlp module with custom forward function
35
36
37
    kwargs:
      generate_device: "cuda"
      prefill_device: "cuda"
Azure's avatar
Azure committed
38
39
40
41
42
43
44
- match:
    class: ktransformers.models.modeling_deepseek_v3.MoEGate
  replace:
    class: ktransformers.operators.gate.KMoEGate
    kwargs:
      generate_device: "cuda:0"
      prefill_device: "cuda:0"
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
- match:
    name: "^model\\.layers\\..*\\.mlp\\.experts$"
  replace:
    class: ktransformers.operators.experts.KTransformersExperts     # custom MoE Kernel with expert paralleism
    kwargs:
      prefill_device: "cuda"
      prefill_op: "KExpertsTorch"
      generate_device: "cpu"
      generate_op: "KExpertsCPU"
      out_device: "cuda"
  recursive: False # don't recursively inject submodules of this module
- match:
    name: "^model\\.layers\\..*\\.self_attn$"
  replace:
    class: ktransformers.operators.attention.KDeepseekV2Attention # optimized MLA implementation
    kwargs:
      generate_device: "cuda"
      prefill_device: "cuda"
63
      absorb_for_prefill: False # change this to True to enable long context(prefill may slower).
64
65
66
67
68
69
70
71
72
73
74
75
76
- match:
    name: "^model$"
  replace:
    class: "ktransformers.operators.models.KDeepseekV2Model"
    kwargs:
      per_layer_prefill_intput_threshold: 0 # 0 is close layer wise prefill
- match:
    name: "^model.embed_tokens"
  replace:
    class: "default"
    kwargs:
      generate_device: "cpu"
      prefill_device: "cpu"