sequence.rs 8.21 KB
Newer Older
1
use super::traits::{TokenIdType, Tokenizer as TokenizerTrait};
2
3
4
5
6
7
8
9
10
11
use anyhow::Result;
use std::sync::Arc;

/// Maintains state for an ongoing sequence of tokens and their decoded text
/// This provides a cleaner abstraction for managing token sequences
pub struct Sequence {
    /// The tokenizer used for encoding/decoding
    tokenizer: Arc<dyn TokenizerTrait>,

    /// The current sequence of token ids
12
    token_ids: Vec<TokenIdType>,
13
14
15
16
17
18

    /// The position in the current sequence the last decoded token completed
    prefix_offset: usize,

    /// Current position in the sequence
    read_offset: usize,
19
20
21

    /// Whether to skip special tokens when decoding
    skip_special_tokens: bool,
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
}

impl std::fmt::Debug for Sequence {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("Sequence")
            .field("tokenizer", &"Arc<dyn Tokenizer>")
            .field(
                "token_ids",
                &format_args!("{}", {
                    let token_ids = self.token_ids();
                    if token_ids.len() <= 20 {
                        format!("{:?}", token_ids)
                    } else {
                        let first_ten = &token_ids[..10];
                        let last_ten = &token_ids[token_ids.len() - 10..];
                        format!("{:?} ... {:?}", first_ten, last_ten)
                    }
                }),
            )
            .field("prefix_offset", &self.prefix_offset)
            .field("read_offset", &self.read_offset)
            .field("token count", &self.token_ids.len())
            .finish()
    }
}

impl Sequence {
    /// Create a new empty sequence
    pub fn new(tokenizer: Arc<dyn TokenizerTrait>) -> Self {
51
52
53
54
55
        Self::new_with_options(tokenizer, false)
    }

    /// Create a new empty sequence with skip_special_tokens option
    pub fn new_with_options(tokenizer: Arc<dyn TokenizerTrait>, skip_special_tokens: bool) -> Self {
56
57
58
59
60
        Self {
            tokenizer,
            token_ids: Vec::new(),
            prefix_offset: 0,
            read_offset: 0,
61
            skip_special_tokens,
62
63
64
65
        }
    }

    /// Create a sequence with initial tokens
66
    pub fn with_tokens(tokenizer: Arc<dyn TokenizerTrait>, token_ids: Vec<TokenIdType>) -> Self {
67
68
69
70
71
72
73
74
75
        Self::with_tokens_and_options(tokenizer, token_ids, false)
    }

    /// Create a sequence with initial tokens and skip_special_tokens option
    pub fn with_tokens_and_options(
        tokenizer: Arc<dyn TokenizerTrait>,
        token_ids: Vec<TokenIdType>,
        skip_special_tokens: bool,
    ) -> Self {
76
77
78
79
80
81
        let len = token_ids.len();
        Self {
            tokenizer,
            token_ids,
            prefix_offset: 0,
            read_offset: len,
82
            skip_special_tokens,
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
        }
    }

    /// Check if the sequence is empty
    pub fn is_empty(&self) -> bool {
        self.token_ids.is_empty()
    }

    /// Get the length of the sequence
    pub fn len(&self) -> usize {
        self.token_ids.len()
    }

    /// Clear the sequence
    pub fn clear(&mut self) {
        self.token_ids.clear();
        self.prefix_offset = 0;
        self.read_offset = 0;
    }

    /// Append text to the sequence by encoding it
    pub fn append_text(&mut self, input: &str) -> Result<()> {
        let encoding = self.tokenizer.encode(input)?;
        self.token_ids.extend(encoding.token_ids());
        Ok(())
    }

    /// Append a single token to the sequence and return newly decoded text
    /// Based on HuggingFace TGI incremental decoding
112
    pub fn append_token(&mut self, token_id: TokenIdType) -> Result<String> {
113
114
115
116
117
118
119
120
        // Store the old read offset before adding the new token
        let old_read_offset = self.read_offset;

        self.token_ids.push(token_id);
        self.read_offset = self.token_ids.len();

        // If this is the first token or we're at the beginning, decode everything
        if self.prefix_offset == 0 && old_read_offset == 0 {
121
122
123
            let text = self
                .tokenizer
                .decode(&self.token_ids, self.skip_special_tokens)?;
124
125
126
127
128
129
130
131
132
            if text.ends_with("�") {
                // Incomplete UTF-8 sequence, wait for more tokens
                return Ok(String::new());
            }
            self.prefix_offset = 0;
            return Ok(text);
        }

        // Decode the text up to the previous position
133
134
135
136
        let prefix_text = self.tokenizer.decode(
            &self.token_ids[self.prefix_offset..old_read_offset],
            self.skip_special_tokens,
        )?;
137
138

        // Decode the text including the new token
139
140
141
142
        let new_text = self.tokenizer.decode(
            &self.token_ids[self.prefix_offset..],
            self.skip_special_tokens,
        )?;
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170

        // Handle multi-byte character boundaries
        let mut prefix_text_len = prefix_text.len();
        while !new_text.is_char_boundary(prefix_text_len) && prefix_text_len > 0 {
            prefix_text_len -= 1;
        }

        if new_text.len() > prefix_text.len() {
            if new_text.ends_with("�") {
                // Incomplete UTF-8 sequence, wait for more tokens
                return Ok(String::new());
            } else {
                // Return the new text portion
                let incremental_text = new_text[prefix_text_len..].to_string().replace("�", "");
                self.prefix_offset = old_read_offset;
                return Ok(incremental_text);
            }
        }

        Ok(String::new())
    }

    /// Get a reference to the tokenizer
    pub fn tokenizer(&self) -> &Arc<dyn TokenizerTrait> {
        &self.tokenizer
    }

    /// Get the current token ids
171
    pub fn token_ids(&self) -> &[TokenIdType] {
172
173
174
175
176
        &self.token_ids
    }

    /// Decode the entire sequence to text
    pub fn text(&self) -> Result<String> {
177
178
        self.tokenizer
            .decode(&self.token_ids, self.skip_special_tokens)
179
180
181
182
183
184
185
186
187
188
189
    }

    /// Get the prefix offset
    pub fn prefix_offset(&self) -> usize {
        self.prefix_offset
    }

    /// Get the read offset
    pub fn read_offset(&self) -> usize {
        self.read_offset
    }
190
191
192
193
194

    /// Get whether special tokens are skipped during decoding
    pub fn skip_special_tokens(&self) -> bool {
        self.skip_special_tokens
    }
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::tokenizer::mock::MockTokenizer;

    #[test]
    fn test_sequence_new() {
        let tokenizer = Arc::new(MockTokenizer::new());
        let seq = Sequence::new(tokenizer);
        assert!(seq.is_empty());
        assert_eq!(seq.len(), 0);
    }

    #[test]
    fn test_sequence_append_text() {
        let tokenizer = Arc::new(MockTokenizer::new());
        let mut seq = Sequence::new(tokenizer);

        seq.append_text("Hello").unwrap();
        assert!(!seq.is_empty());
        assert!(!seq.is_empty());

        let text = seq.text().unwrap();
        assert_eq!(text, "Hello");
    }

    #[test]
    fn test_sequence_append_token() {
        let tokenizer = Arc::new(MockTokenizer::new());
        let mut seq = Sequence::new(tokenizer.clone());

        // Start with an empty sequence and append token 1 ("Hello")
        let text1 = seq.append_token(1).unwrap();
        assert_eq!(text1, "Hello");

        // Now append token 2 ("world")
        // The mock tokenizer will decode [1, 2] as "Hello world" (with a space)
        let text2 = seq.append_token(2).unwrap();
        // The incremental text should be " world" (with the space that the mock tokenizer adds)
        assert_eq!(text2, " world");

        assert_eq!(seq.text().unwrap(), "Hello world");
    }

    #[test]
    fn test_sequence_clear() {
        let tokenizer = Arc::new(MockTokenizer::new());
        let mut seq = Sequence::new(tokenizer);

        seq.append_text("Hello world").unwrap();
        assert!(!seq.is_empty());

        seq.clear();
        assert!(seq.is_empty());
        assert_eq!(seq.len(), 0);
        assert_eq!(seq.prefix_offset(), 0);
        assert_eq!(seq.read_offset(), 0);
    }

    #[test]
    fn test_sequence_debug() {
        let tokenizer = Arc::new(MockTokenizer::new());
        let mut seq = Sequence::new(tokenizer);

        seq.append_text("Test").unwrap();
        let debug_str = format!("{:?}", seq);
        assert!(debug_str.contains("Sequence"));
        assert!(debug_str.contains("token count"));
    }
}