model.go 8.34 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
package gptoss

import (
	"cmp"
	"math"
	"strings"

	"github.com/ollama/ollama/fs"
	"github.com/ollama/ollama/kvcache"
	"github.com/ollama/ollama/ml"
	"github.com/ollama/ollama/ml/nn"
	"github.com/ollama/ollama/ml/nn/fast"
	"github.com/ollama/ollama/ml/nn/rope"
	"github.com/ollama/ollama/model"
	"github.com/ollama/ollama/model/input"
)

type Transformer struct {
	model.Base
	model.BytePairEncoding

	TokenEmbedding    *nn.Embedding      `gguf:"token_embd"`
	TransformerBlocks []TransformerBlock `gguf:"blk"`
	OutputNorm        *nn.RMSNorm        `gguf:"output_norm"`
	Output            *nn.Linear         `gguf:"output,alt:token_embd"`

	Options
}

// Forward implements model.Model.
func (m *Transformer) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
	hiddenStates := m.TokenEmbedding.Forward(ctx, batch.Inputs)
Michael Yang's avatar
Michael Yang committed
33
	positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions))
Michael Yang's avatar
Michael Yang committed
34
35
36
37
38
39
40
41
42

	for i, block := range m.TransformerBlocks {
		m.Cache.SetLayer(i)
		if c, ok := m.Cache.(*kvcache.WrapperCache); ok {
			// Even layers are sliding window attention.
			c.SetLayerType(i % 2)
		}

		var outputs ml.Tensor
43
44
		if i == len(m.TransformerBlocks)-1 {
			outputs = batch.Outputs
Michael Yang's avatar
Michael Yang committed
45
46
		}

47
		hiddenStates = block.Forward(ctx, hiddenStates, positions, outputs, m.Cache, &m.Options)
Michael Yang's avatar
Michael Yang committed
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
	}

	hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps)
	return m.Output.Forward(ctx, hiddenStates), nil
}

func (m *Transformer) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) {
	return fast.RoPE(ctx, key, shift, m.headDim(), m.ropeBase, 1./m.ropeScale, m.RoPEOptions()...), nil
}

type Options struct {
	hiddenSize,
	numHeads,
	numKVHeads,
	keyLength,
	valueLength,
	numExperts,
	numExpertsUsed,
	originalContextLength int

	eps,
	ropeBase,
	ropeScale float32
}

func (o Options) RoPEOptions() []func(*rope.Options) {
	return []func(*rope.Options){
		rope.WithTypeNeoX(),
		rope.WithOriginalContextLength(o.originalContextLength),
		rope.WithExtrapolationFactor(1.),
		// NOTE: ggml sets this implicitly so there's no need to set it here
		// rope.WithAttentionFactor(0.1*float32(math.Log(float64(o.ropeScale))) + 1.0),
	}
}

func (o Options) headDim() int {
	return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads)
}

type TransformerBlock struct {
	Attention *AttentionBlock
	MLP       *MLPBlock
}

92
func (d *TransformerBlock) Forward(ctx ml.Context, hiddenStates, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
93
94
95
96
97
	hiddenStates = d.Attention.Forward(ctx, hiddenStates, positions, cache, opts)
	if outputs != nil {
		hiddenStates = hiddenStates.Rows(ctx, outputs)
	}

98
	hiddenStates = d.MLP.Forward(ctx, hiddenStates, opts)
Michael Yang's avatar
Michael Yang committed
99
100
101
102
	return hiddenStates
}

type AttentionBlock struct {
103
104
105
106
107
108
109
110
111
112
	Norm *nn.RMSNorm `gguf:"attn_norm"`

	QKV *nn.Linear `gguf:"attn_qkv"`

	Query *nn.Linear `gguf:"attn_q"`
	Key   *nn.Linear `gguf:"attn_k"`
	Value *nn.Linear `gguf:"attn_v"`

	Output *nn.Linear `gguf:"attn_out,alt:attn_output"`
	Sinks  ml.Tensor  `gguf:"attn_sinks,alt:attn_sinks.weight"`
Michael Yang's avatar
Michael Yang committed
113
114
115
116
117
118
119
120
}

func (attn *AttentionBlock) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
	batchSize := hiddenStates.Dim(1)

	residual := hiddenStates
	hiddenStates = attn.Norm.Forward(ctx, hiddenStates, opts.eps)

121
122
123
	var query, key, value ml.Tensor
	if attn.QKV != nil {
		qkv := attn.QKV.Forward(ctx, hiddenStates)
124
125
126
		qkv = qkv.Reshape(ctx, opts.headDim(), -1, batchSize)
		chunks := qkv.ChunkSections(ctx, 1, opts.numHeads, opts.numKVHeads, opts.numKVHeads)
		query, key, value = chunks[0], chunks[1], chunks[2]
127
128
129
130
131
132
133
134
135
136
	} else {
		query = attn.Query.Forward(ctx, hiddenStates)
		query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize)

		key = attn.Key.Forward(ctx, hiddenStates)
		key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize)

		value = attn.Value.Forward(ctx, hiddenStates)
		value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize)
	}
Michael Yang's avatar
Michael Yang committed
137
138
139
140

	query = fast.RoPE(ctx, query, positions, opts.headDim(), opts.ropeBase, 1./opts.ropeScale, opts.RoPEOptions()...)
	key = fast.RoPE(ctx, key, positions, opts.headDim(), opts.ropeBase, 1./opts.ropeScale, opts.RoPEOptions()...)

141
	attention := nn.AttentionWithSinks(ctx, query, key, value, attn.Sinks, 1/math.Sqrt(float64(opts.headDim())), cache)
Michael Yang's avatar
Michael Yang committed
142
143
144
145
146
	attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize)
	return attn.Output.Forward(ctx, attention).Add(ctx, residual)
}

type MLPBlock struct {
147
148
149
	Norm   *nn.RMSNorm `gguf:"ffn_norm,alt:post_attention_norm"`
	Router *nn.Linear  `gguf:"ffn_gate_inp"`

Michael Yang's avatar
Michael Yang committed
150
	GateUp *nn.LinearBatch `gguf:"ffn_gate_up_exps"`
151
152
153
154
155

	Gate *nn.LinearBatch `gguf:"ffn_gate_exps"`
	Up   *nn.LinearBatch `gguf:"ffn_up_exps"`

	Down *nn.LinearBatch `gguf:"ffn_down_exps"`
Michael Yang's avatar
Michael Yang committed
156
157
}

158
func (mlp *MLPBlock) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *Options) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
	hiddenDim, sequenceLength, batchSize := hiddenStates.Dim(0), hiddenStates.Dim(1), hiddenStates.Dim(2)

	residual := hiddenStates
	hiddenStates = mlp.Norm.Forward(ctx, hiddenStates, opts.eps)

	hiddenStates = hiddenStates.Reshape(ctx, hiddenDim, sequenceLength*batchSize)
	routingWeights := mlp.Router.Forward(ctx, hiddenStates)

	selectedExperts := routingWeights.TopK(ctx, opts.numExpertsUsed)
	routingWeights = routingWeights.Reshape(ctx, 1, opts.numExperts, sequenceLength*batchSize).Rows(ctx, selectedExperts)
	routingWeights = routingWeights.Reshape(ctx, opts.numExpertsUsed, sequenceLength*batchSize).Softmax(ctx)
	routingWeights = routingWeights.Reshape(ctx, 1, opts.numExpertsUsed, sequenceLength*batchSize)

	hiddenStates = hiddenStates.Reshape(ctx, hiddenStates.Dim(0), 1, hiddenStates.Dim(1))

174
175
176
	var gate, up ml.Tensor
	if mlp.GateUp != nil {
		hiddenStates = mlp.GateUp.Forward(ctx, hiddenStates, selectedExperts)
177
178
		gate = hiddenStates.Slice(ctx, 0, 0, hiddenStates.Dim(0), 2)
		up = hiddenStates.Slice(ctx, 0, 1, hiddenStates.Dim(0), 2)
179
180
181
182
	} else {
		gate = mlp.Gate.Forward(ctx, hiddenStates, selectedExperts)
		up = mlp.Up.Forward(ctx, hiddenStates, selectedExperts)
	}
Michael Yang's avatar
Michael Yang committed
183

184
	hiddenStates = gate.SILUAlphaLimit(ctx, up, 1.702, 7)
Michael Yang's avatar
Michael Yang committed
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212

	experts := mlp.Down.Forward(ctx, hiddenStates, selectedExperts)
	experts = experts.Mul(ctx, routingWeights)

	nextStates := experts.View(ctx, 0, experts.Dim(0), experts.Stride(2), experts.Dim(2))
	for i := 1; i < opts.numExpertsUsed; i++ {
		nextStates = nextStates.Add(ctx, experts.View(ctx, i*experts.Stride(1), experts.Dim(0), experts.Stride(2), experts.Dim(2)))
	}

	return nextStates.Add(ctx, residual)
}

func New(c fs.Config) (model.Model, error) {
	m := Transformer{
		TransformerBlocks: make([]TransformerBlock, c.Uint("block_count")),
		BytePairEncoding: model.NewBytePairEncoding(
			&model.Vocabulary{
				Values: c.Strings("tokenizer.ggml.tokens"),
				Types:  c.Ints("tokenizer.ggml.token_type"),
				Merges: c.Strings("tokenizer.ggml.merges"),
				AddBOS: c.Bool("tokenizer.ggml.add_bos_token", false),
				BOS:    []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))},
				AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false),
				EOS: append(
					[]int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))},
					c.Ints("tokenizer.ggml.eos_token_ids")...,
				),
			},
213
214
215
216
217
218
219
220
221
			strings.Join([]string{
				`[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?`,
				`[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?`,
				`\p{N}{1,3}`,
				` ?[^\s\p{L}\p{N}]+[\r\n/]*`,
				`\s*[\r\n]+`,
				`\s+(?!\S)`,
				`\s+`,
			}, "|"),
Michael Yang's avatar
Michael Yang committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
		),
		Options: Options{
			hiddenSize:            int(c.Uint("embedding_length")),
			numHeads:              int(c.Uint("attention.head_count")),
			numKVHeads:            int(c.Uint("attention.head_count_kv")),
			keyLength:             int(c.Uint("attention.key_length")),
			valueLength:           int(c.Uint("attention.value_length")),
			numExperts:            int(c.Uint("expert_count")),
			numExpertsUsed:        int(c.Uint("expert_used_count")),
			eps:                   c.Float("attention.layer_norm_rms_epsilon"),
			ropeBase:              c.Float("rope.freq_base"),
			ropeScale:             c.Float("rope.scaling.factor", 1.),
			originalContextLength: int(c.Uint("rope.scaling.original_context_length")),
		},
	}

	m.Cache = kvcache.NewWrapperCache(
		kvcache.NewSWAMemCache(int32(c.Uint("attention.sliding_window")), 4096, m.Shift),
		kvcache.NewCausalCache(m.Shift),
	)
	return &m, nil
}

func init() {
	model.Register("gptoss", New)
247
	model.Register("gpt-oss", New)
Michael Yang's avatar
Michael Yang committed
248
}