convert_gemma.go 2.66 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
package convert

import (
	"strings"

	"github.com/pdevine/tensor"
	"github.com/pdevine/tensor/native"

	"github.com/ollama/ollama/llm"
)

type gemma struct {
	Parameters
	MaxPositionEmbeddings uint32  `json:"max_position_embeddings"`
	HiddenSize            uint32  `json:"hidden_size"`
	HiddenLayers          uint32  `json:"num_hidden_layers"`
	IntermediateSize      uint32  `json:"intermediate_size"`
	NumAttentionHeads     uint32  `json:"num_attention_heads"`
	NumKeyValueHeads      uint32  `json:"num_key_value_heads"`
	RMSNormEPS            float32 `json:"rms_norm_eps"`
	HeadDim               uint32  `json:"head_dim"`
}

var _ Converter = (*gemma)(nil)

func (p *gemma) KV(t *Tokenizer) llm.KV {
	kv := p.Parameters.KV(t)
	kv["general.architecture"] = "gemma"
	kv["general.name"] = "gemma"
	kv["gemma.context_length"] = p.MaxPositionEmbeddings
	kv["gemma.embedding_length"] = p.HiddenSize
	kv["gemma.block_count"] = p.HiddenLayers
	kv["gemma.feed_forward_length"] = p.IntermediateSize
	kv["gemma.attention.head_count"] = p.NumAttentionHeads
	kv["gemma.attention.head_count_kv"] = p.NumKeyValueHeads
	kv["gemma.attention.layer_norm_rms_epsilon"] = p.RMSNormEPS
	kv["gemma.attention.key_length"] = p.HeadDim
	kv["gemma.attention.value_length"] = p.HeadDim
	kv["tokenizer.ggml.eot_token_id"] = uint32(107)
	kv["tokenizer.ggml.middle_token_id"] = uint32(68)
	kv["tokenizer.ggml.prefix_token_id"] = uint32(67)
	kv["tokenizer.ggml.suffix_token_id"] = uint32(69)
	return kv
}

Michael Yang's avatar
Michael Yang committed
46
func (p *gemma) Tensors(ts []Tensor) []llm.Tensor {
Michael Yang's avatar
Michael Yang committed
47
	out := make([]llm.Tensor, 0, len(ts))
Michael Yang's avatar
Michael Yang committed
48
	for _, t := range ts {
Michael Yang's avatar
Michael Yang committed
49
		if strings.HasSuffix(t.Name(), "_norm.weight") {
Michael Yang's avatar
Michael Yang committed
50
51
52
			t.SetRepacker(p.addOne)
		}

Michael Yang's avatar
Michael Yang committed
53
		out = append(out, llm.Tensor{
Michael Yang's avatar
Michael Yang committed
54
			Name:     t.Name(),
Michael Yang's avatar
Michael Yang committed
55
56
57
58
59
60
61
62
63
			Kind:     t.Kind(),
			Shape:    t.Shape(),
			WriterTo: t,
		})
	}

	return out
}

Michael Yang's avatar
Michael Yang committed
64
65
func (p *gemma) Replacements() []string {
	return []string{
Michael Yang's avatar
Michael Yang committed
66
67
68
69
70
71
72
73
74
75
76
77
		"model.embed_tokens", "token_embd",
		"model.norm", "output_norm",
		"model.layers", "blk",
		"input_layernorm", "attn_norm",
		"self_attn.q_proj", "attn_q",
		"self_attn.k_proj", "attn_k",
		"self_attn.v_proj", "attn_v",
		"self_attn.o_proj", "attn_output",
		"mlp.gate_proj", "ffn_gate",
		"mlp.down_proj", "ffn_down",
		"mlp.up_proj", "ffn_up",
		"post_attention_layernorm", "ffn_norm",
Michael Yang's avatar
Michael Yang committed
78
	}
Michael Yang's avatar
Michael Yang committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
}

func (*gemma) addOne(_ string, data []float32, shape []uint64) ([]float32, error) {
	n := tensor.New(tensor.WithShape(int(shape[0])), tensor.WithBacking(data))
	ones := tensor.Ones(tensor.Float32, int(shape[0]))

	n, err := n.Add(ones)
	if err != nil {
		return nil, err
	}

	ts, err := native.SelectF32(n, 0)
	if err != nil {
		return nil, err
	}

	var f32s []float32
	for _, t := range ts {
		f32s = append(f32s, t...)
	}

	return f32s, nil
}