0021-decode-disable-output_all.patch 942 Bytes
Newer Older
Michael Yang's avatar
Michael Yang committed
1
2
3
4
5
6
7
8
9
10
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Michael Yang <git@mxy.ng>
Date: Mon, 18 Aug 2025 16:58:39 -0700
Subject: [PATCH] decode: disable output_all

---
 src/llama-context.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/llama-context.cpp b/src/llama-context.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
11
index bd348bcad..8b4a89d38 100644
Michael Yang's avatar
Michael Yang committed
12
13
--- a/src/llama-context.cpp
+++ b/src/llama-context.cpp
Daniel Hiltgen's avatar
Daniel Hiltgen committed
14
@@ -974,8 +974,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
Michael Yang's avatar
Michael Yang committed
15
16
17
18
19
20
21
22
23
     const int64_t n_vocab = vocab.n_tokens();
     const int64_t n_embd  = hparams.n_embd;
 
-    // when computing embeddings, all tokens are output
-    const bool output_all = cparams.embeddings;
+    const bool output_all = false;
 
     if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) {
         LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);