convert_mllama.go 5.24 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
package convert

import (
	"strings"

	"github.com/ollama/ollama/fs/ggml"
	"github.com/pdevine/tensor"
	"github.com/pdevine/tensor/native"
)

type mllamaModel struct {
	ModelParameters
	TextModel struct {
		llamaModel

		CrossAttentionLayers []int32 `json:"cross_attention_layers"`
	} `json:"text_config"`
	VisionModel struct {
		NumHiddenLayers           uint32  `json:"num_hidden_layers"`
		NumGlobalLayers           uint32  `json:"num_global_layers"`
		IntermediateLayersIndices []int32 `json:"intermediate_layers_indices"`

		HiddenSize       uint32 `json:"hidden_size"`
		IntermediateSize uint32 `json:"intermediate_size"`

		AttentionHeads uint32 `json:"attention_heads"`

		ImageSize   uint32  `json:"image_size"`
		PatchSize   uint32  `json:"patch_size"`
		NumChannels uint32  `json:"num_channels"`
		MaxNumTiles uint32  `json:"max_num_tiles"`
		NormEpsilon float32 `json:"norm_eps"`
		RopeTheta   float32 `json:"rope.freq_base"`
	} `json:"vision_config"`
}

func (m *mllamaModel) KV(t *Tokenizer) ggml.KV {
	kv := m.ModelParameters.KV(t)
	kv["general.architecture"] = "mllama"

	for k, v := range m.TextModel.KV(t) {
		if strings.HasPrefix(k, "llama.") {
			kv[strings.ReplaceAll(k, "llama.", "mllama.")] = v
		}
	}

	kv["mllama.attention.cross_attention_layers"] = m.TextModel.CrossAttentionLayers

	kv["mllama.vision.block_count"] = m.VisionModel.NumHiddenLayers
	kv["mllama.vision.global.block_count"] = m.VisionModel.NumGlobalLayers
	kv["mllama.vision.intermediate_layers_indices"] = m.VisionModel.IntermediateLayersIndices

	kv["mllama.vision.embedding_length"] = m.VisionModel.HiddenSize
	kv["mllama.vision.feed_forward_length"] = m.VisionModel.IntermediateSize

	kv["mllama.vision.attention.head_count"] = m.VisionModel.AttentionHeads
	kv["mllama.vision.attention.layer_norm_epsilon"] = m.VisionModel.NormEpsilon

	kv["mllama.vision.image_size"] = m.VisionModel.ImageSize
	kv["mllama.vision.patch_size"] = m.VisionModel.PatchSize
	kv["mllama.vision.max_num_tiles"] = m.VisionModel.MaxNumTiles
	kv["mllama.vision.num_channels"] = m.VisionModel.NumChannels

	return kv
}

func (m *mllamaModel) Replacements() []string {
	return append(
		m.TextModel.Replacements(),
		"language_model.", "",
		"gate_attn", "attn_gate",
		"gate_ffn", "ffn_gate",
		"cross_attn.", "cross_attn_",
		"vision_model", "v",
		"class_embedding", "class_embd",
		"patch_embedding", "patch_embd",
		"gated_positional_embedding.tile_embedding", "tile_position_embd",
		"gated_positional_embedding.embedding", "position_embd.weight",
		"gated_positional_embedding", "position_embd",
		"embedding.weight", "weight",
		"pre_tile_positional_embedding", "pre_tile_position_embd",
		"post_tile_positional_embedding", "post_tile_position_embd",
		"layernorm_pre", "pre_ln",
		"layernorm_post", "post_ln",
		"global_transformer.layers", "global.blk",
		"transformer.layers", "blk",
		"mlp.fc1", "ffn_up",
		"mlp.fc2", "ffn_down",
		"multi_modal_projector", "mm.0",
	)
}

func (m *mllamaModel) Tensors(ts []Tensor) []*ggml.Tensor {
	var out []*ggml.Tensor
	var text []Tensor
	for _, t := range ts {
Michael Yang's avatar
Michael Yang committed
97
98
99
		if !strings.HasPrefix(t.Name(), "v.") && !strings.HasPrefix(t.Name(), "mm.") {
			text = append(text, t)
		} else if t.Name() == "v.position_embd.gate" {
100
101
102
103
104
105
106
107
108
109
			for _, name := range []string{"v.position_embd.gate", "v.tile_position_embd.gate"} {
				tt := t.Clone()
				tt.SetRepacker(m.repack(name))
				out = append(out, &ggml.Tensor{
					Name:     name,
					Kind:     t.Kind(),
					Shape:    t.Shape(),
					WriterTo: tt,
				})
			}
Michael Yang's avatar
Michael Yang committed
110
111
112
113
114
115
116
117
118
		} else {
			if t.Name() == "v.pre_tile_position_embd.gate" || t.Name() == "v.post_tile_position_embd.gate" {
				t.SetRepacker(m.repack(t.Name()))
			} else if strings.HasSuffix(t.Name(), "attn_q.weight") || strings.HasSuffix(t.Name(), "attn_k.weight") {
				t.SetRepacker(m.repack(t.Name()))
			} else if strings.HasSuffix(t.Name(), "attn_gate") || strings.HasSuffix(t.Name(), "ffn_gate") {
				t.SetRepacker(m.repack(t.Name()))
			}

119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
			out = append(out, &ggml.Tensor{
				Name:     t.Name(),
				Kind:     t.Kind(),
				Shape:    t.Shape(),
				WriterTo: t,
			})
		}
	}

	return append(out, m.TextModel.Tensors(text)...)
}

func (m *mllamaModel) repack(name string) Repacker {
	return func(_ string, data []float32, shape []uint64) (_ []float32, err error) {
		dims := make([]int, len(shape))
		for i, dim := range shape {
			dims[i] = int(dim)
		}

		var t tensor.Tensor = tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))

Michael Yang's avatar
Michael Yang committed
140
141
142
143
144
		if strings.HasSuffix(name, "attn_q.weight") || strings.HasSuffix(name, "attn_k.weight") {
			heads := m.VisionModel.AttentionHeads
			if err := t.Reshape(append([]int{int(heads), 2, dims[0] / int(heads) / 2}, dims[1:]...)...); err != nil {
				return nil, err
			}
145

Michael Yang's avatar
Michael Yang committed
146
147
148
149
150
151
152
153
154
155
156
157
158
			if err := t.T(0, 2, 1, 3); err != nil {
				return nil, err
			}

			if err := t.Reshape(dims...); err != nil {
				return nil, err
			}

			if err := t.Transpose(); err != nil {
				return nil, err
			}
		} else {
			t, err = tensor.Tanh(t)
159
160
161
			if err != nil {
				return nil, err
			}
Michael Yang's avatar
Michael Yang committed
162
163
164
165
166
167
168

			if name == "v.position_embd.gate" {
				t, err = tensor.Sub(float32(1), t)
				if err != nil {
					return nil, err
				}
			}
169
170
171
172
173
174
175
176
177
178
179
		}

		t = tensor.Materialize(t)
		// flatten tensor so it can be return as a vector
		if err := t.Reshape(t.Shape().TotalSize()); err != nil {
			return nil, err
		}

		return native.VectorF32(t.(*tensor.Dense))
	}
}