llama.go 4.89 KB
Newer Older
1
2
3
4
package convert

import (
	"encoding/binary"
Michael Yang's avatar
Michael Yang committed
5
	"errors"
6
7
	"fmt"
	"io"
Patrick Devine's avatar
Patrick Devine committed
8
9
	"os"
	"path/filepath"
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
	"regexp"
	"strings"

	"github.com/nlpodyssey/gopickle/pytorch"
	"github.com/pdevine/tensor"
	"github.com/pdevine/tensor/native"
	"github.com/x448/float16"

	"github.com/ollama/ollama/llm"
)

type LlamaModel struct {
	ModelData
}

Patrick Devine's avatar
Patrick Devine committed
25
func llamaTorchLayerHandler(w io.Writer, r torchWriterTo) error {
26

Patrick Devine's avatar
Patrick Devine committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
	var tData []uint16
	switch r.storage.(type) {
	case *pytorch.HalfStorage:
		data := r.storage.(*pytorch.HalfStorage).Data
		tData = make([]uint16, len(data))
		for cnt, v := range data {
			tData[cnt] = uint16(float16.Fromfloat32(v))
		}
	case *pytorch.BFloat16Storage:
		data := r.storage.(*pytorch.BFloat16Storage).Data
		tData = make([]uint16, len(data))

		for cnt, v := range data {
			tData[cnt] = uint16(float16.Fromfloat32(v))
		}
	default:
		return fmt.Errorf("unknown storage type for torch")
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
	}

	var err error
	var heads uint32
	if strings.Contains(r.t.Name, "attn_q") {
		heads = uint32(r.params.AttentionHeads)
	} else if strings.Contains(r.t.Name, "attn_k") {
		heads = uint32(r.params.KeyValHeads)
		if heads == 0 {
			heads = uint32(r.params.AttentionHeads)
		}
	} else {
		return fmt.Errorf("unknown layer type")
	}

	tData, err = llamaRepack(tData, int(heads), r.t.Shape)
	if err != nil {
		return err
	}

	if err = binary.Write(w, r.bo, tData); err != nil {
		return err
	}
	return nil
}

func llamaRepack(data []uint16, heads int, shape []uint64) ([]uint16, error) {
	n := tensor.New(tensor.WithShape(int(shape[0]), int(shape[1])), tensor.WithBacking(data))
	origShape := n.Shape().Clone()

	// reshape the tensor and swap axes 1 and 2 to unpack the layer for gguf
	if err := n.Reshape(heads, 2, origShape[0]/heads/2, origShape[1]); err != nil {
		return nil, err
	}

	if err := n.T(0, 2, 1, 3); err != nil {
		return nil, err
	}

	if err := n.Reshape(origShape...); err != nil {
		return nil, err
	}

	if err := n.Transpose(); err != nil {
		return nil, err
	}
	newN, err := native.SelectU16(n, 1)
	if err != nil {
		return nil, err
	}

	var fullTensor []uint16
	for _, v := range newN {
		fullTensor = append(fullTensor, v...)
	}
	return fullTensor, nil
}

func (m *LlamaModel) GetTensors() error {
	t, err := m.Format.GetTensors(m.Path, m.Params)
	if err != nil {
		return err
	}

	m.Tensors = []llm.Tensor{}

	pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
	re, err := regexp.Compile(pattern)
	if err != nil {
		return err
	}

	for _, l := range t {
		matches := re.FindAllStringSubmatch(l.Name, -1)
		if len(matches) > 0 {
Patrick Devine's avatar
Patrick Devine committed
119
120
			switch m.Format.(type) {
			case *TorchFormat:
Patrick Devine's avatar
Patrick Devine committed
121
122
123
				wt := l.WriterTo.(torchWriterTo)
				wt.handler = llamaTorchLayerHandler
				l.WriterTo = wt
Patrick Devine's avatar
Patrick Devine committed
124
			case *SafetensorFormat:
Patrick Devine's avatar
Patrick Devine committed
125
126
127
128
				wt := l.WriterTo.(safetensorWriterTo)
				wt.handler = mistralLayerHandler
				l.WriterTo = wt
			}
129
130
131
132
133
134
135
136
		}
		m.Tensors = append(m.Tensors, l)
	}

	return nil
}

func (m *LlamaModel) LoadVocab() error {
Michael Yang's avatar
Michael Yang committed
137
	v := &Vocab{}
138

Patrick Devine's avatar
Patrick Devine committed
139
	tokpath := filepath.Join(m.Path, "tokenizer.json")
Michael Yang's avatar
Michael Yang committed
140
141
142
	pre, ts, merges, err := parseTokens(tokpath)
	if errors.Is(err, os.ErrNotExist) {
		v, err = LoadSentencePieceTokens(m.Path, m.Params)
Patrick Devine's avatar
Patrick Devine committed
143
144
145
		if err != nil {
			return err
		}
Michael Yang's avatar
Michael Yang committed
146
147
	} else if err != nil {
		return err
Patrick Devine's avatar
Patrick Devine committed
148
	} else {
Michael Yang's avatar
Michael Yang committed
149
150
151
		for _, t := range ts {
			v.Tokens = append(v.Tokens, t.Content)
			v.Types = append(v.Types, t.Type())
Patrick Devine's avatar
Patrick Devine committed
152
153
		}

Michael Yang's avatar
Michael Yang committed
154
155
		m.Params.PreTokenizer = pre
		v.Merges = merges
Patrick Devine's avatar
Patrick Devine committed
156
	}
Michael Yang's avatar
Michael Yang committed
157

158
	m.Vocab = v
Patrick Devine's avatar
Patrick Devine committed
159

160
161
162
	return nil
}

Michael Yang's avatar
Michael Yang committed
163
func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error {
164
165
166
167
168
169
170
171
	kv := llm.KV{
		"general.architecture":                   "llama",
		"general.name":                           m.Name,
		"llama.vocab_size":                       uint32(len(m.Vocab.Tokens)),
		"llama.context_length":                   uint32(m.Params.ContextSize),
		"llama.embedding_length":                 uint32(m.Params.HiddenSize),
		"llama.block_count":                      uint32(m.Params.HiddenLayers),
		"llama.feed_forward_length":              uint32(m.Params.IntermediateSize),
Patrick Devine's avatar
Patrick Devine committed
172
		"llama.rope.freq_base":                   float32(m.Params.RopeFrequencyBase),
173
174
175
176
		"llama.rope.dimension_count":             uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
		"llama.attention.head_count":             uint32(m.Params.AttentionHeads),
		"llama.attention.head_count_kv":          uint32(m.Params.KeyValHeads),
		"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
Patrick Devine's avatar
Patrick Devine committed
177
178
		"general.file_type":                      uint32(2),
		"tokenizer.ggml.model":                   "gpt2",
179

Michael Yang's avatar
Michael Yang committed
180
		"tokenizer.ggml.pre":        m.Params.PreTokenizer,
181
182
183
184
185
186
		"tokenizer.ggml.tokens":     m.Vocab.Tokens,
		"tokenizer.ggml.token_type": m.Vocab.Types,

		"tokenizer.ggml.bos_token_id":     uint32(m.Params.BoSTokenID),
		"tokenizer.ggml.eos_token_id":     uint32(m.Params.EoSTokenID),
		"tokenizer.ggml.unknown_token_id": uint32(0),
Patrick Devine's avatar
Patrick Devine committed
187
188
189
190
191
192
	}

	if len(m.Vocab.Merges) > 0 {
		kv["tokenizer.ggml.merges"] = m.Vocab.Merges
	} else {
		kv["tokenizer.ggml.scores"] = m.Vocab.Scores
193
194
	}

Michael Yang's avatar
Michael Yang committed
195
	return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
196
}