model.go 6.77 KB
Newer Older
Patrick Devine's avatar
Patrick Devine committed
1
2
3
4
5
package gemma2

import (
	"math"

6
	"github.com/ollama/ollama/fs"
Patrick Devine's avatar
Patrick Devine committed
7
8
9
	"github.com/ollama/ollama/kvcache"
	"github.com/ollama/ollama/ml"
	"github.com/ollama/ollama/ml/nn"
10
	"github.com/ollama/ollama/ml/nn/rope"
Patrick Devine's avatar
Patrick Devine committed
11
12
13
14
15
16
17
18
19
20
21
22
23
	"github.com/ollama/ollama/model"
	"github.com/ollama/ollama/model/input"
)

type Options struct {
	hiddenSize, numHeads, numKVHeads int
	attnKeyLen, attnValLen           int
	eps, ropeBase, ropeScale         float32
	attnLogitSoftcap                 float32
	finalLogitSoftcap                float32
	largeModelScaling                bool
}

Michael Yang's avatar
Michael Yang committed
24
25
26
27
func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions ml.Tensor) ml.Tensor {
	return nn.RoPE(ctx, states, positions, o.attnKeyLen, o.ropeBase, 1./o.ropeScale, rope.WithTypeNeoX())
}

Patrick Devine's avatar
Patrick Devine committed
28
29
type Model struct {
	model.Base
30
	model.SentencePiece
Patrick Devine's avatar
Patrick Devine committed
31
32
33
34
35
36
37
38
39
40
41
42
43

	TokenEmbedding *nn.Embedding `gguf:"token_embd"`
	Layers         []Layer       `gguf:"blk"`
	OutputNorm     *nn.RMSNorm   `gguf:"output_norm"`
	Output         *nn.Linear    `gguf:"output,alt:token_embd"` // just set to token_embd?

	*Options
}

const (
	gemma27BLayerCount = 46
)

44
func New(c fs.Config) (model.Model, error) {
Patrick Devine's avatar
Patrick Devine committed
45
	m := Model{
46
		SentencePiece: model.NewSentencePiece(
Patrick Devine's avatar
Patrick Devine committed
47
48
49
			&model.Vocabulary{
				Values: c.Strings("tokenizer.ggml.tokens"),
				Scores: c.Floats("tokenizer.ggml.scores"),
Michael Yang's avatar
Michael Yang committed
50
				Types:  c.Ints("tokenizer.ggml.token_type"),
51
52
53
54
55
56
57
				AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true),
				BOS:    []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))},
				AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false),
				EOS: append(
					[]int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))},
					c.Ints("tokenizer.ggml.eos_token_ids")...,
				),
Patrick Devine's avatar
Patrick Devine committed
58
59
60
61
62
63
64
65
66
67
68
			},
		),
		Layers: make([]Layer, c.Uint("block_count")),
		Options: &Options{
			hiddenSize:        int(c.Uint("embedding_length")),
			numHeads:          int(c.Uint("attention.head_count")),
			numKVHeads:        int(c.Uint("attention.head_count_kv")),
			attnKeyLen:        int(c.Uint("attention.key_length")),
			attnValLen:        int(c.Uint("attention.value_length")),
			eps:               c.Float("attention.layer_norm_rms_epsilon"),
			ropeBase:          c.Float("rope.freq_base", 10000.0),
69
			ropeScale:         c.Float("rope.scaling.factor", 1.0),
Patrick Devine's avatar
Patrick Devine committed
70
71
72
73
74
75
76
			attnLogitSoftcap:  c.Float("attn_logit_softcapping"),
			finalLogitSoftcap: c.Float("final_logit_softcapping"),
		},
	}

	slidingWindowLen := int32(c.Uint("attention.sliding_window"))
	m.Cache = kvcache.NewWrapperCache(kvcache.NewSWACache(slidingWindowLen, m.Shift), kvcache.NewCausalCache(m.Shift))
Jesse Gross's avatar
Jesse Gross committed
77
	m.Cache.SetConfig(ml.CacheConfig{})
Patrick Devine's avatar
Patrick Devine committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

	return &m, nil
}

type SelfAttention struct {
	Query  *nn.Linear `gguf:"attn_q"`
	Key    *nn.Linear `gguf:"attn_k"`
	Value  *nn.Linear `gguf:"attn_v"`
	Output *nn.Linear `gguf:"attn_output"`
}

func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positionIDs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
	batchSize := hiddenState.Dim(1)

	q := sa.Query.Forward(ctx, hiddenState)
	q = q.Reshape(ctx, opts.attnKeyLen, opts.numHeads, batchSize)
Michael Yang's avatar
Michael Yang committed
94
	q = opts.applyRotaryPositionEmbeddings(ctx, q, positionIDs)
Patrick Devine's avatar
Patrick Devine committed
95
96

	if opts.largeModelScaling {
Jesse Gross's avatar
Jesse Gross committed
97
		q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.hiddenSize/opts.numHeads)))
Patrick Devine's avatar
Patrick Devine committed
98
99
100
101
102
103
	} else {
		q = q.Scale(ctx, 1.0/math.Sqrt(float64(opts.attnKeyLen)))
	}

	k := sa.Key.Forward(ctx, hiddenState)
	k = k.Reshape(ctx, opts.attnKeyLen, opts.numKVHeads, batchSize)
Michael Yang's avatar
Michael Yang committed
104
	k = opts.applyRotaryPositionEmbeddings(ctx, k, positionIDs)
Patrick Devine's avatar
Patrick Devine committed
105
106
107
108
109
110
111

	v := sa.Value.Forward(ctx, hiddenState)
	v = v.Reshape(ctx, opts.attnValLen, opts.numKVHeads, batchSize)

	cache.Put(ctx, k, v)
	k, v, mask := cache.Get(ctx)

Jesse Gross's avatar
Jesse Gross committed
112
113
	q = q.Permute(ctx, 0, 2, 1, 3)
	k = k.Permute(ctx, 0, 2, 1, 3)
Patrick Devine's avatar
Patrick Devine committed
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
	v = v.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)

	kq := k.Mulmat(ctx, q)

	// logit softcap
	kq = kq.Scale(ctx, 1.0/float64(opts.attnLogitSoftcap))
	kq = kq.Tanh(ctx)
	kq = kq.Scale(ctx, float64(opts.attnLogitSoftcap))

	kq = kq.Add(ctx, mask)
	kq = kq.Softmax(ctx)

	kqv := v.Mulmat(ctx, kq)
	kqv = kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
	kqv = kqv.Reshape(ctx, opts.attnValLen*opts.numHeads, batchSize)

	return sa.Output.Forward(ctx, kqv)
}

func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) {
Michael Yang's avatar
Michael Yang committed
134
	return m.applyRotaryPositionEmbeddings(ctx, key, shift), nil
Patrick Devine's avatar
Patrick Devine committed
135
136
137
138
139
140
141
142
143
}

type MLP struct {
	Up   *nn.Linear `gguf:"ffn_up"`
	Down *nn.Linear `gguf:"ffn_down"`
	Gate *nn.Linear `gguf:"ffn_gate"`
}

func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *Options) ml.Tensor {
144
	hiddenState = mlp.Gate.Forward(ctx, hiddenState).GELU(ctx, mlp.Up.Forward(ctx, hiddenState))
Patrick Devine's avatar
Patrick Devine committed
145
146
147
148
149
150
151
152
153
154
155
156
	return mlp.Down.Forward(ctx, hiddenState)
}

type Layer struct {
	AttentionNorm     *nn.RMSNorm `gguf:"attn_norm"`
	SelfAttention     *SelfAttention
	PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"`
	MLPNorm           *nn.RMSNorm `gguf:"ffn_norm"`
	MLP               *MLP
	PostMLPNorm       *nn.RMSNorm `gguf:"post_ffw_norm"`
}

Jesse Gross's avatar
Jesse Gross committed
157
func (l *Layer) Forward(ctx ml.Context, hiddenState, positionIDs, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
Patrick Devine's avatar
Patrick Devine committed
158
159
160
161
162
	residual := hiddenState

	hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps)
	hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positionIDs, cache, opts)
	hiddenState = l.PostAttentionNorm.Forward(ctx, hiddenState, opts.eps)
Jesse Gross's avatar
Jesse Gross committed
163
164
165
166
167
168
169
170

	// In the final layer (outputs != nil), optimize by pruning to just the token positions
	// we need logits for.
	if outputs != nil {
		hiddenState = hiddenState.Rows(ctx, outputs)
		residual = residual.Rows(ctx, outputs)
	}

Patrick Devine's avatar
Patrick Devine committed
171
172
173
174
175
176
177
178
179
	hiddenState = hiddenState.Add(ctx, residual)
	residual = hiddenState

	hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps)
	hiddenState = l.MLP.Forward(ctx, hiddenState, opts)
	hiddenState = l.PostMLPNorm.Forward(ctx, hiddenState, opts.eps)
	return hiddenState.Add(ctx, residual)
}

Jesse Gross's avatar
Jesse Gross committed
180
func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
Michael Yang's avatar
Michael Yang committed
181
	positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions))
Jesse Gross's avatar
Jesse Gross committed
182

183
	hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs)
Patrick Devine's avatar
Patrick Devine committed
184
185
186
187
188
189
190
191
192
193
194
	hiddenState = hiddenState.Scale(ctx, math.Sqrt(float64(m.Options.hiddenSize)))

	if len(m.Layers) == gemma27BLayerCount {
		m.Options.largeModelScaling = true
	}

	for i, layer := range m.Layers {
		cacheType := i % 2
		m.Cache.SetLayer(i)
		wc := m.Cache.(*kvcache.WrapperCache)
		wc.SetLayerType(cacheType)
Jesse Gross's avatar
Jesse Gross committed
195
196
197

		var lastLayerOutputs ml.Tensor
		if i == len(m.Layers)-1 {
198
			lastLayerOutputs = batch.Outputs
Jesse Gross's avatar
Jesse Gross committed
199
200
201
		}

		hiddenState = layer.Forward(ctx, hiddenState, positions, lastLayerOutputs, m.Cache, m.Options)
Patrick Devine's avatar
Patrick Devine committed
202
203
204
205
206
207
208
209
	}

	hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps)
	hiddenState = m.Output.Forward(ctx, hiddenState)

	// final logit softcap
	hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.Options.finalLogitSoftcap))
	hiddenState = hiddenState.Tanh(ctx)
210
	return hiddenState.Scale(ctx, float64(m.Options.finalLogitSoftcap)), nil
Patrick Devine's avatar
Patrick Devine committed
211
212
213
214
215
}

func init() {
	model.Register("gemma2", New)
}