Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ollama
Commits
1188f408
Unverified
Commit
1188f408
authored
Oct 28, 2025
by
Michael Yang
Committed by
GitHub
Oct 28, 2025
Browse files
s/From*Slice/From*s/ (#12255)
parent
15c7d30d
Changes
24
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
9 additions
and
9 deletions
+9
-9
model/models/qwen25vl/model_vision.go
model/models/qwen25vl/model_vision.go
+4
-4
model/models/qwen3/model.go
model/models/qwen3/model.go
+1
-1
runner/ollamarunner/multimodal.go
runner/ollamarunner/multimodal.go
+1
-1
runner/ollamarunner/runner.go
runner/ollamarunner/runner.go
+3
-3
No files found.
model/models/qwen25vl/model_vision.go
View file @
1188f408
...
@@ -43,7 +43,7 @@ func blockDiagonalMask(ctx ml.Context, seqLength int, bounds []int, numHeads int
...
@@ -43,7 +43,7 @@ func blockDiagonalMask(ctx ml.Context, seqLength int, bounds []int, numHeads int
}
}
}
}
mask
:=
ctx
.
Input
()
.
FromFloat
Slice
(
flat
,
seqLength
,
seqLength
)
mask
:=
ctx
.
Input
()
.
FromFloat
s
(
flat
,
seqLength
,
seqLength
)
// Reshape to match [seqLength, seqLength, 1] for broadcasting
// Reshape to match [seqLength, seqLength, 1] for broadcasting
mask
=
mask
.
Reshape
(
ctx
,
seqLength
,
seqLength
,
1
)
mask
=
mask
.
Reshape
(
ctx
,
seqLength
,
seqLength
,
1
)
...
@@ -299,7 +299,7 @@ func (m *VisionModel) WindowIndex(ctx ml.Context, grid *Grid) (ml.Tensor, []int)
...
@@ -299,7 +299,7 @@ func (m *VisionModel) WindowIndex(ctx ml.Context, grid *Grid) (ml.Tensor, []int)
}
}
}
}
t
:=
ctx
.
Input
()
.
FromInt
Slice
(
index
,
len
(
index
))
t
:=
ctx
.
Input
()
.
FromInt
s
(
index
,
len
(
index
))
return
t
,
bounds
return
t
,
bounds
}
}
...
@@ -319,7 +319,7 @@ func (m *VisionModel) PositionalEmbedding(ctx ml.Context, grid *Grid) ml.Tensor
...
@@ -319,7 +319,7 @@ func (m *VisionModel) PositionalEmbedding(ctx ml.Context, grid *Grid) ml.Tensor
freqVals
[
i
*
freq
+
j
]
=
float32
(
i
)
/
float32
(
math
.
Pow
(
theta
,
float64
(
j
*
2
)
/
float64
(
dim
)))
freqVals
[
i
*
freq
+
j
]
=
float32
(
i
)
/
float32
(
math
.
Pow
(
theta
,
float64
(
j
*
2
)
/
float64
(
dim
)))
}
}
}
}
freqs
:=
ctx
.
Input
()
.
FromFloat
Slice
(
freqVals
,
freq
,
maxGridSize
)
freqs
:=
ctx
.
Input
()
.
FromFloat
s
(
freqVals
,
freq
,
maxGridSize
)
// Create position coordinates (y,x pairs) for the grid
// Create position coordinates (y,x pairs) for the grid
// In PyTorch: Equivalent to generating position ids with torch.arange()
// In PyTorch: Equivalent to generating position ids with torch.arange()
...
@@ -329,7 +329,7 @@ func (m *VisionModel) PositionalEmbedding(ctx ml.Context, grid *Grid) ml.Tensor
...
@@ -329,7 +329,7 @@ func (m *VisionModel) PositionalEmbedding(ctx ml.Context, grid *Grid) ml.Tensor
coords
=
append
(
coords
,
int32
(
y
),
int32
(
x
))
coords
=
append
(
coords
,
int32
(
y
),
int32
(
x
))
}
}
}
}
pos
:=
ctx
.
Input
()
.
FromInt
Slice
(
coords
,
2
,
grid
.
Width
,
grid
.
Height
)
pos
:=
ctx
.
Input
()
.
FromInt
s
(
coords
,
2
,
grid
.
Width
,
grid
.
Height
)
// Reshape and permute positions to match spatial merging pattern
// Reshape and permute positions to match spatial merging pattern
pos
=
pos
.
Reshape
(
ctx
,
2
,
grid
.
Width
,
merge
,
grid
.
Height
/
merge
)
pos
=
pos
.
Reshape
(
ctx
,
2
,
grid
.
Width
,
merge
,
grid
.
Height
/
merge
)
...
...
model/models/qwen3/model.go
View file @
1188f408
...
@@ -181,7 +181,7 @@ func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
...
@@ -181,7 +181,7 @@ func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
// Forward implements model.Model.
// Forward implements model.Model.
func
(
m
*
Model
)
forward
(
ctx
ml
.
Context
,
batch
input
.
Batch
)
(
ml
.
Tensor
,
error
)
{
func
(
m
*
Model
)
forward
(
ctx
ml
.
Context
,
batch
input
.
Batch
)
(
ml
.
Tensor
,
error
)
{
positions
:=
ctx
.
Input
()
.
FromInt
Slice
(
batch
.
Positions
,
len
(
batch
.
Positions
))
positions
:=
ctx
.
Input
()
.
FromInt
s
(
batch
.
Positions
,
len
(
batch
.
Positions
))
hiddenStates
:=
m
.
TokenEmbedding
.
Forward
(
ctx
,
batch
.
Inputs
)
hiddenStates
:=
m
.
TokenEmbedding
.
Forward
(
ctx
,
batch
.
Inputs
)
...
...
runner/ollamarunner/multimodal.go
View file @
1188f408
...
@@ -102,7 +102,7 @@ func (m multimodalStore) getTensor(backend ml.Backend, ctx ml.Context, in ml.Ten
...
@@ -102,7 +102,7 @@ func (m multimodalStore) getTensor(backend ml.Backend, ctx ml.Context, in ml.Ten
for
i
,
t
:=
range
entry
.
mm
{
for
i
,
t
:=
range
entry
.
mm
{
if
in
==
t
.
Tensor
{
if
in
==
t
.
Tensor
{
if
!
reserve
{
if
!
reserve
{
return
ctx
.
Input
()
.
FromFloat
Slice
(
entry
.
data
[
i
],
t
.
Tensor
.
Shape
()
...
),
nil
return
ctx
.
Input
()
.
FromFloat
s
(
entry
.
data
[
i
],
t
.
Tensor
.
Shape
()
...
),
nil
}
else
{
}
else
{
return
ctx
.
Input
()
.
Empty
(
t
.
Tensor
.
DType
(),
t
.
Tensor
.
Shape
()
...
),
nil
return
ctx
.
Input
()
.
Empty
(
t
.
Tensor
.
DType
(),
t
.
Tensor
.
Shape
()
...
),
nil
}
}
...
...
runner/ollamarunner/runner.go
View file @
1188f408
...
@@ -599,7 +599,7 @@ func (s *Server) forwardBatch(pendingBatch batchState) (nextBatch batchState, er
...
@@ -599,7 +599,7 @@ func (s *Server) forwardBatch(pendingBatch batchState) (nextBatch batchState, er
// Actual batchInputs values will be injected into the batch.Inputs tensor before calling Compute
// Actual batchInputs values will be injected into the batch.Inputs tensor before calling Compute
batch
.
Inputs
=
nextBatch
.
ctx
.
Input
()
.
Empty
(
ml
.
DTypeI32
,
len
(
batchInputs
))
batch
.
Inputs
=
nextBatch
.
ctx
.
Input
()
.
Empty
(
ml
.
DTypeI32
,
len
(
batchInputs
))
batch
.
Outputs
=
nextBatch
.
ctx
.
Input
()
.
FromInt
Slice
(
batchOutputs
,
len
(
batchOutputs
))
batch
.
Outputs
=
nextBatch
.
ctx
.
Input
()
.
FromInt
s
(
batchOutputs
,
len
(
batchOutputs
))
nextBatch
.
modelOutput
,
err
=
model
.
Forward
(
nextBatch
.
ctx
,
s
.
model
,
batch
)
nextBatch
.
modelOutput
,
err
=
model
.
Forward
(
nextBatch
.
ctx
,
s
.
model
,
batch
)
if
err
!=
nil
{
if
err
!=
nil
{
err
=
fmt
.
Errorf
(
"failed to build graph: %w"
,
err
)
err
=
fmt
.
Errorf
(
"failed to build graph: %w"
,
err
)
...
@@ -692,7 +692,7 @@ func (s *Server) computeBatch(activeBatch batchState) {
...
@@ -692,7 +692,7 @@ func (s *Server) computeBatch(activeBatch batchState) {
// At this point the seqs are ready for forwardBatch to move forward so unblock
// At this point the seqs are ready for forwardBatch to move forward so unblock
s
.
mu
.
Unlock
()
s
.
mu
.
Unlock
()
activeBatch
.
batch
.
Inputs
.
SetValue
FromInt
Slice
(
batchInputs
)
activeBatch
.
batch
.
Inputs
.
FromInt
s
(
batchInputs
)
activeBatch
.
ctx
.
ComputeWithNotify
(
activeBatch
.
ctx
.
ComputeWithNotify
(
func
()
{
func
()
{
logutil
.
Trace
(
"computeBatch: signaling computeStartedCh"
,
"batchID"
,
activeBatch
.
id
)
logutil
.
Trace
(
"computeBatch: signaling computeStartedCh"
,
"batchID"
,
activeBatch
.
id
)
...
@@ -1090,7 +1090,7 @@ func (s *Server) reserveWorstCaseGraph() error {
...
@@ -1090,7 +1090,7 @@ func (s *Server) reserveWorstCaseGraph() error {
batch
.
Positions
[
i
]
=
int32
(
i
)
batch
.
Positions
[
i
]
=
int32
(
i
)
}
}
batch
.
Inputs
=
ctx
.
Input
()
.
FromInt
Slice
(
batchInputs
,
len
(
batchInputs
))
batch
.
Inputs
=
ctx
.
Input
()
.
FromInt
s
(
batchInputs
,
len
(
batchInputs
))
batch
.
Outputs
=
ctx
.
Input
()
.
Empty
(
ml
.
DTypeI32
,
s
.
parallel
)
batch
.
Outputs
=
ctx
.
Input
()
.
Empty
(
ml
.
DTypeI32
,
s
.
parallel
)
cache
:=
s
.
model
.
Config
()
.
Cache
cache
:=
s
.
model
.
Config
()
.
Cache
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment