model.go 6.63 KB
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
3
package llama

import (
4
	"cmp"
Michael Yang's avatar
Michael Yang committed
5
6
	"math"

7
	"github.com/ollama/ollama/fs"
Jesse Gross's avatar
Jesse Gross committed
8
	"github.com/ollama/ollama/kvcache"
Michael Yang's avatar
Michael Yang committed
9
10
	"github.com/ollama/ollama/ml"
	"github.com/ollama/ollama/ml/nn"
11
	"github.com/ollama/ollama/ml/nn/rope"
Michael Yang's avatar
Michael Yang committed
12
	"github.com/ollama/ollama/model"
13
	"github.com/ollama/ollama/model/input"
Michael Yang's avatar
Michael Yang committed
14
15
16
)

type Options struct {
17
18
19
	hiddenSize, numHeads, numKVHeads int
	headDim, ropeDim                 int
	eps, ropeBase, ropeScale         float32
Michael Yang's avatar
Michael Yang committed
20
21
}

Michael Yang's avatar
Michael Yang committed
22
23
24
25
func (o Options) applyRotaryPositionEmbeddings(ctx ml.Context, states, positions, factors ml.Tensor) ml.Tensor {
	return nn.RoPE(ctx, states, positions, cmp.Or(o.ropeDim, o.headDim, o.hiddenSize/o.numHeads), o.ropeBase, 1./o.ropeScale, rope.WithFactors(factors))
}

Michael Yang's avatar
Michael Yang committed
26
27
type Model struct {
	model.Base
28
	model.TextProcessor
Michael Yang's avatar
Michael Yang committed
29
30
31
32
33
34

	TokenEmbedding *nn.Embedding `gguf:"token_embd"`
	Layers         []Layer       `gguf:"blk"`
	OutputNorm     *nn.RMSNorm   `gguf:"output_norm"`
	Output         *nn.Linear    `gguf:"output,alt:token_embd"`

35
	Options
Michael Yang's avatar
Michael Yang committed
36
37
}

38
func New(c fs.Config) (model.Model, error) {
39
40
41
	if c.Uint("expert_count") > 0 {
		// TODO: support mixtures of experts
		return nil, model.ErrUnsupportedModel
42
	}
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59

	var processor model.TextProcessor
	vocabulary := model.Vocabulary{
		Values: c.Strings("tokenizer.ggml.tokens"),
		Scores: c.Floats("tokenizer.ggml.scores"),
		Types:  c.Ints("tokenizer.ggml.token_type"),
		Merges: c.Strings("tokenizer.ggml.merges"),
		AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true),
		BOS:    []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))},
		AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false),
		EOS: append(
			[]int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))},
			c.Ints("tokenizer.ggml.eos_token_ids")...,
		),
	}
	switch c.String("tokenizer.ggml.model") {
	case "gpt2":
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
		var pretokenizers []string
		switch c.String("tokenizer.ggml.pre") {
		case "default":
			// no-op use the default bpe pretokenizer
		case "qwen2":
			pretokenizers = []string{
				"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
			}
		case "refact":
			pretokenizers = []string{
				`\p{N}`,
				`'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+`,
			}
		case "tekken":
			pretokenizers = []string{
				"[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
			}
		default:
			// use a llama-style pretokenizer
			pretokenizers = []string{
				"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
			}
		}
		processor = model.NewBytePairEncoding(&vocabulary, pretokenizers...)
84
85
86
87
	case "llama":
		processor = model.NewSentencePiece(&vocabulary)
	default:
		return nil, model.ErrUnsupportedTokenizer
88
	}
89

Jesse Gross's avatar
Jesse Gross committed
90
	m := Model{
91
92
93
		TextProcessor: processor,
		Layers:        make([]Layer, c.Uint("block_count")),
		Options: Options{
94
95
96
			hiddenSize: int(c.Uint("embedding_length")),
			numHeads:   int(c.Uint("attention.head_count")),
			numKVHeads: int(c.Uint("attention.head_count_kv")),
97
			headDim:    int(c.Uint("attention.key_length")),
98
			ropeDim:    int(c.Uint("rope.dimension_count")),
Michael Yang's avatar
Michael Yang committed
99
			eps:        c.Float("attention.layer_norm_rms_epsilon"),
100
101
			ropeBase:   c.Float("rope.freq_base", 1e5),
			ropeScale:  c.Float("rope.scaling.factor", 1),
Michael Yang's avatar
Michael Yang committed
102
		},
Jesse Gross's avatar
Jesse Gross committed
103
104
105
106
107
	}

	m.Cache = kvcache.NewCausalCache(m.Shift)

	return &m, nil
Michael Yang's avatar
Michael Yang committed
108
109
110
}

type SelfAttention struct {
111
112
113
114
115
	Query       *nn.Linear `gguf:"attn_q"`
	Key         *nn.Linear `gguf:"attn_k"`
	Value       *nn.Linear `gguf:"attn_v"`
	Output      *nn.Linear `gguf:"attn_output"`
	RopeFactors ml.Tensor  `gguf:"rope_freqs.weight"`
Michael Yang's avatar
Michael Yang committed
116
117
}

Michael Yang's avatar
Michael Yang committed
118
func (sa *SelfAttention) Forward(ctx ml.Context, hiddenState, positions ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
119
	batchSize := hiddenState.Dim(1)
120
	headDim := cmp.Or(opts.headDim, opts.hiddenSize/opts.numHeads)
Michael Yang's avatar
Michael Yang committed
121

Michael Yang's avatar
Michael Yang committed
122
123
	query := sa.Query.Forward(ctx, hiddenState)
	query = query.Reshape(ctx, headDim, opts.numHeads, batchSize)
Michael Yang's avatar
Michael Yang committed
124

Michael Yang's avatar
Michael Yang committed
125
126
	key := sa.Key.Forward(ctx, hiddenState)
	key = key.Reshape(ctx, headDim, opts.numKVHeads, batchSize)
Michael Yang's avatar
Michael Yang committed
127

Michael Yang's avatar
Michael Yang committed
128
129
	value := sa.Value.Forward(ctx, hiddenState)
	value = value.Reshape(ctx, headDim, opts.numKVHeads, batchSize)
Michael Yang's avatar
Michael Yang committed
130

Michael Yang's avatar
Michael Yang committed
131
132
	query = opts.applyRotaryPositionEmbeddings(ctx, query, positions, sa.RopeFactors)
	key = opts.applyRotaryPositionEmbeddings(ctx, key, positions, sa.RopeFactors)
Michael Yang's avatar
Michael Yang committed
133

Michael Yang's avatar
Michael Yang committed
134
135
136
	attention := nn.Attention(ctx, query, key, value, 1.0/math.Sqrt(float64(headDim)), cache)
	attention = attention.Reshape(ctx, headDim*opts.numHeads, batchSize)
	return sa.Output.Forward(ctx, attention)
Michael Yang's avatar
Michael Yang committed
137
138
}

Jesse Gross's avatar
Jesse Gross committed
139
func (m *Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) {
Michael Yang's avatar
Michael Yang committed
140
	return m.applyRotaryPositionEmbeddings(ctx, key, shift, m.Layers[layer].SelfAttention.RopeFactors), nil
Jesse Gross's avatar
Jesse Gross committed
141
142
}

Michael Yang's avatar
Michael Yang committed
143
144
145
146
147
148
149
type MLP struct {
	Up   *nn.Linear `gguf:"ffn_up"`
	Down *nn.Linear `gguf:"ffn_down"`
	Gate *nn.Linear `gguf:"ffn_gate"`
}

func (mlp *MLP) Forward(ctx ml.Context, hiddenState ml.Tensor, opts *Options) ml.Tensor {
150
	hiddenState = mlp.Gate.Forward(ctx, hiddenState).SILU(ctx, mlp.Up.Forward(ctx, hiddenState))
Michael Yang's avatar
Michael Yang committed
151
152
153
154
155
156
157
158
159
160
	return mlp.Down.Forward(ctx, hiddenState)
}

type Layer struct {
	AttentionNorm *nn.RMSNorm `gguf:"attn_norm"`
	SelfAttention *SelfAttention
	MLPNorm       *nn.RMSNorm `gguf:"ffn_norm"`
	MLP           *MLP
}

Michael Yang's avatar
Michael Yang committed
161
func (l *Layer) Forward(ctx ml.Context, hiddenState, positions, outputs ml.Tensor, cache kvcache.Cache, opts *Options) ml.Tensor {
Michael Yang's avatar
Michael Yang committed
162
163
164
	residual := hiddenState

	hiddenState = l.AttentionNorm.Forward(ctx, hiddenState, opts.eps)
Michael Yang's avatar
Michael Yang committed
165
	hiddenState = l.SelfAttention.Forward(ctx, hiddenState, positions, cache, opts)
166
167
168
169
170
171
172
173

	// In the final layer (outputs != nil), optimize by pruning to just the token positions
	// we need logits for.
	if outputs != nil {
		hiddenState = hiddenState.Rows(ctx, outputs)
		residual = residual.Rows(ctx, outputs)
	}

Michael Yang's avatar
Michael Yang committed
174
175
176
177
178
179
180
181
	hiddenState = hiddenState.Add(ctx, residual)
	residual = hiddenState

	hiddenState = l.MLPNorm.Forward(ctx, hiddenState, opts.eps)
	hiddenState = l.MLP.Forward(ctx, hiddenState, opts)
	return hiddenState.Add(ctx, residual)
}

Jesse Gross's avatar
Jesse Gross committed
182
func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
Michael Yang's avatar
Michael Yang committed
183
	positions := ctx.Input().FromInts(batch.Positions, len(batch.Positions))
Michael Yang's avatar
Michael Yang committed
184

185
	hiddenState := m.TokenEmbedding.Forward(ctx, batch.Inputs)
Michael Yang's avatar
Michael Yang committed
186
187

	for i, layer := range m.Layers {
Jesse Gross's avatar
Jesse Gross committed
188
		m.Cache.SetLayer(i)
Michael Yang's avatar
Michael Yang committed
189

Michael Yang's avatar
Michael Yang committed
190
		var outputs ml.Tensor
191
		if i == len(m.Layers)-1 {
192
			outputs = batch.Outputs
193
		}
Michael Yang's avatar
Michael Yang committed
194

195
		hiddenState = layer.Forward(ctx, hiddenState, positions, outputs, m.Cache, &m.Options)
Michael Yang's avatar
Michael Yang committed
196
197
	}

198
199
	hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps)
	return m.Output.Forward(ctx, hiddenState), nil
Michael Yang's avatar
Michael Yang committed
200
201
202
203
204
}

func init() {
	model.Register("llama", New)
}