model.py 22.5 KB
Newer Older
Baber's avatar
Baber committed
1
2
from __future__ import annotations

3
import abc
4
5
6
import hashlib
import json
import logging
haileyschoelkopf's avatar
haileyschoelkopf committed
7
import os
Baber's avatar
Baber committed
8
9
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, TypeVar
10

11
from tqdm import tqdm
12

13
from lm_eval import utils
14

lintangsutawika's avatar
lintangsutawika committed
15

16
17
18
19
20
21
if TYPE_CHECKING:
    from sqlitedict import SqliteDict

    from lm_eval.api.instance import Instance


Lintang Sutawika's avatar
Lintang Sutawika committed
22
eval_logger = logging.getLogger(__name__)
23

24
25
T = TypeVar("T", bound="LM")

26
27

class LM(abc.ABC):
Ethan Smith's avatar
Ethan Smith committed
28
    def __init__(self) -> None:
29
        """Defines the interface that should be implemented by all LM subclasses.
Baber's avatar
cleanup  
Baber committed
30
        LMs are assumed to take text (strings) as input and yield strings or logprobabilities as output
31
32
33
        (inputs/outputs should be tokenization-agnostic.)

        """
34
35
36
        # set rank and world size to a single process, by default.
        self._rank = 0
        self._world_size = 1
Baber's avatar
Baber committed
37
        self.cache_hook: CacheHook = CacheHook(None)
38
39

    @abc.abstractmethod
Baber's avatar
cleanup  
Baber committed
40
    def loglikelihood(self, requests: list[Instance]) -> list[tuple[float, bool]]:
41
42
43
44
        """Compute log-likelihood of generating a continuation from a context.
        Downstream tasks should attempt to use loglikelihood instead of other
        LM calls whenever possible.

baberabb's avatar
baberabb committed
45
46
47
        :param requests: list[Instance]
            A list of Instance objects, with property `args` which returns a tuple (context, continuation).
            `context: str`
48
49
                Context string. Implementations of LM must be able to handle an
                empty context string.
baberabb's avatar
baberabb committed
50
            `continuation: str`
51
52
53
                The continuation over which log likelihood will be calculated. If
                there is a word boundary, the space should be in the continuation.
                For example, context="hello" continuation=" world" is correct.
baberabb's avatar
baberabb committed
54
55

        :return: list[tuple[float, bool]]
56
            A list of pairs (logprob, isgreedy)
baberabb's avatar
baberabb committed
57
58
59
60
            `logprob: float`
                The log probability of `continuation`.
            `isgreedy`:
                Whether `continuation` would be generated by greedy sampling from `context`.
61
62
63
64
        """
        pass

    @abc.abstractmethod
Baber's avatar
cleanup  
Baber committed
65
    def loglikelihood_rolling(self, requests: list[Instance]) -> list[float]:
66
67
68
69
70
71
72
        """Compute full log-likelihood of a string, with no truncation, for perplexity computation
        - We will use the full max context length of the model.
        - For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
        the max context length.
        - IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
          which may simply concatenate multiple documents together.
        - IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
Baber's avatar
cleanup  
Baber committed
73
          multiple chunks, the last input will still have full-sized context.
74
75
          Example:
            Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
76
            Prefix: BOS/EOS
77
78
79
            Max context length: 4
            Resulting input/prediction pairs:

80
                INPUT:  BOS   0   1   2
81
82
83
84
85
86
87
88
89
90
91
92
                PRED:     0   1   2   3

                INPUT:    3   4   5   6
                PRED:     4   5   6   7

                INPUT:    5   6   7   8
                PRED:             8   9

          Observe that:
            1. Each token is predicted exactly once
            2. For the last pair, we provide the full context, but only score the last two tokens

baberabb's avatar
baberabb committed
93
        :param requests: list[Instance]
94
            A list of Instance objects with property `args` which returns a tuple (context,).
95
            string: str
96
97
98
                String for which we are computing overall loglikelihood
        :return: list[tuple[float]]
            A list of tuples (logprob,)
99
            logprob: float
100
101
                The log probability of `context` conditioned on the BOS/EOS token.
                Can also be overridden for custom cases by `prefix_token_id`.
102
103
104
105
106
        """
        pass

    # TODO: Add an optional max length
    @abc.abstractmethod
Baber's avatar
Baber committed
107
    def generate_until(self, requests: list[Instance]) -> list[str]:
108
109
        """Generate greedily until a stopping sequence

baberabb's avatar
baberabb committed
110
        :param requests: list[Instance]
Baber Abbasi's avatar
Baber Abbasi committed
111
            A list of Instance objects with property `args` which returns a tuple (context, gen_kwargs).
112
113
            context: str
                Context string
Baber Abbasi's avatar
Baber Abbasi committed
114
115
            gen_kwargs: dict
                A dictionary of keyword arguments to pass to the generation function e.g. top_k, until, etc.
baberabb's avatar
baberabb committed
116
        :return: list[str]
Baber Abbasi's avatar
Baber Abbasi committed
117
            A list of model generated continuations.
118
119
120
121
122
            continuation: str
                The generated continuation.
        """
        pass

Baber Abbasi's avatar
Baber Abbasi committed
123
    def apply_chat_template(
Baber's avatar
cleanup  
Baber committed
124
        self, chat_history: list[dict], add_generation_prompt=True
Baber Abbasi's avatar
Baber Abbasi committed
125
    ) -> str:
KonradSzafer's avatar
KonradSzafer committed
126
127
128
129
130
131
        """
        Defines how to transform few-shot examples provided as chat history into a format that can be used as input to the LM.

        :param chat_history: list[dict[str, str]]
            A list of dictionaries with keys 'role' and 'content'.
            Values are strings representing the role name and the content of the message, respectively.
Baber Abbasi's avatar
Baber Abbasi committed
132
133
        :param add_generation_prompt: bool
            Whether to append an assistant gen prefix (for e.g. <|assistant|>) to the assistant messages in the chat history. False if prefilling an assistant message.
KonradSzafer's avatar
KonradSzafer committed
134
135
136
137
138
139
140
        :return: str
            A string representing the chat history in a format that can be used as input to the LM.
        """
        raise NotImplementedError(
            "To use this model with chat templates, please implement the 'apply_chat_template' method for your model type."
        )

141
    @classmethod
142
    def create_from_arg_string(
Baber's avatar
Baber committed
143
        cls: type[T], arg_string: str, additional_config: dict | None = None
144
145
146
147
148
149
150
151
152
153
154
    ) -> T:
        """
        Creates an instance of the LM class using the given argument string and additional config.

        Parameters:
        - arg_string: A string containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """
155
156
157
158
        additional_config = {} if additional_config is None else additional_config
        args = utils.simple_parse_args_string(arg_string)
        args2 = {k: v for k, v in additional_config.items() if v is not None}
        return cls(**args, **args2)
haileyschoelkopf's avatar
haileyschoelkopf committed
159

160
161
    @classmethod
    def create_from_arg_obj(
Baber's avatar
Baber committed
162
        cls: type[T], arg_dict: dict, additional_config: dict | None = None
163
164
165
166
167
168
169
170
171
172
173
174
    ) -> T:
        """
        Creates an instance of the LM class using the given arg_obj

        Parameters:
        - arg_obj: A dict containing arguments in the format key1=value1,key2=value2.
        - additional_config: Optional dictionary containing additional configuration parameters.

        Returns:
        - Instance of the LM class.
        """

175
        additional_config = additional_config or {} | {
176
177
178
179
180
            k: v for k, v in additional_config.items() if v is not None
        }

        return cls(**arg_dict, **additional_config)

haileyschoelkopf's avatar
haileyschoelkopf committed
181
    @property
Baber's avatar
cleanup  
Baber committed
182
    def rank(self) -> int:
Baber's avatar
cleanup  
Baber committed
183
        """Returns the rank of the current process in a distributed setting."""
haileyschoelkopf's avatar
haileyschoelkopf committed
184
185
186
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
187
        return self._rank
haileyschoelkopf's avatar
haileyschoelkopf committed
188
189

    @property
Baber's avatar
cleanup  
Baber committed
190
    def world_size(self) -> int:
Baber's avatar
cleanup  
Baber committed
191
        """Returns the total number of processes in a distributed setting."""
haileyschoelkopf's avatar
haileyschoelkopf committed
192
193
194
        # used in the case of parallelism. Hardcoded to
        # ensure no errors arise using API models which do
        # not support multi-device parallelism nor expect it.
195
        return self._world_size
haileyschoelkopf's avatar
haileyschoelkopf committed
196

KonradSzafer's avatar
KonradSzafer committed
197
198
199
200
201
202
203
204
205
206
    @property
    def tokenizer_name(self) -> str:
        """Must be defined for LM subclasses which implement Chat Templating.
        Should return the name of the tokenizer or chat template used.
        Used only to properly fingerprint caches when requests are being cached with `--cache_requests`, otherwise not used.
        """
        raise NotImplementedError(
            "To use this model with chat templates, please implement the 'tokenizer_name' property."
        )

Baber's avatar
Baber committed
207
    def chat_template(self, chat_template: bool | str = False) -> str | None:
208
209
210
        """Returns the chat template structure for user/assistant messages if a template is provided.
        This method is intended to be overridden in a subclass to define a specific chat template format.
        For models that do not support chat templates, this method returns None by default.
KonradSzafer's avatar
KonradSzafer committed
211
        """
212
213

        return ""
KonradSzafer's avatar
KonradSzafer committed
214

Baber's avatar
Baber committed
215
    def set_cache_hook(self, cache_hook: CacheHook) -> None:
Baber's avatar
cleanup  
Baber committed
216
        """Sets the cache hook for the LM, which is used to cache responses from the LM."""
haileyschoelkopf's avatar
haileyschoelkopf committed
217
218
219
220
        self.cache_hook = cache_hook


### SQLite-based caching of LM responses
221
def hash_args(attr: str, args: Iterable[Any]) -> str:
haileyschoelkopf's avatar
haileyschoelkopf committed
222
223
224
225
226
    dat = json.dumps([attr] + list(args))
    return hashlib.sha256(dat.encode("utf-8")).hexdigest()


class CacheHook:
Baber's avatar
Baber committed
227
    def __init__(self, cachinglm: CachingLM | None) -> None:
Baber's avatar
cleanup  
Baber committed
228
        """CacheHook is used to cache responses from the LM."""
haileyschoelkopf's avatar
haileyschoelkopf committed
229
        if cachinglm is None:
Baber's avatar
Baber committed
230
            self.dbdict: SqliteDict | None = None
haileyschoelkopf's avatar
haileyschoelkopf committed
231
232
233
234
            return

        self.dbdict = cachinglm.dbdict

235
    def add_partial(self, attr: str, req: Iterable[Any], res: Any) -> None:
Baber's avatar
cleanup  
Baber committed
236
        """Adds a partial result to the cache."""
haileyschoelkopf's avatar
haileyschoelkopf committed
237
238
239
240
241
242
243
        if self.dbdict is None:
            return
        hsh = hash_args(attr, req)
        self.dbdict[hsh] = res


class CachingLM:
Baber's avatar
Baber committed
244
    def __init__(self, lm: LM, cache_db: str) -> None:
haileyschoelkopf's avatar
haileyschoelkopf committed
245
246
247
248
249
250
251
        """LM wrapper that returns cached results if they exist, and uses the underlying LM if not.

        :param lm: LM
            Underlying LM
        :param cache_db: str
            Path to cache db
        """
252
253
254
255
        from sqlitedict import SqliteDict

        self.lm: LM = lm
        self.cache_db: str = cache_db
haileyschoelkopf's avatar
haileyschoelkopf committed
256
257
258
259
260
261
262
        if os.path.dirname(cache_db):
            os.makedirs(os.path.dirname(cache_db), exist_ok=True)
        self.dbdict = SqliteDict(cache_db, autocommit=True)

        # add hook to lm
        lm.set_cache_hook(self.get_cache_hook())

263
    def __getattr__(self, attr: str) -> Any:
haileyschoelkopf's avatar
haileyschoelkopf committed
264
        lm_attr = getattr(self.lm, attr)
Baber Abbasi's avatar
Baber Abbasi committed
265
266
        if attr not in ["loglikelihood", "loglikelihood_rolling", "generate_until"]:
            eval_logger.debug(f"Passing through attribute '{attr}' to underlying LM")
haileyschoelkopf's avatar
haileyschoelkopf committed
267
268
            return lm_attr

Baber's avatar
Baber committed
269
        def _fn(requests: list[Instance]) -> list[Instance]:
haileyschoelkopf's avatar
haileyschoelkopf committed
270
271
272
273
            res = []
            remaining_reqs = []
            warned = False
            # figure out which ones are cached and which ones are new
274
275
276
            eval_logger.info(
                f"Loading '{attr}' responses from cache '{self.cache_db}' where possible..."
            )
277
            for req in tqdm(requests, desc="Checking cached requests"):
haileyschoelkopf's avatar
haileyschoelkopf committed
278
                hsh = hash_args(attr, req.args)
279
                if attr == "generate_until" and req.args[1].get("do_sample", False):
haileyschoelkopf's avatar
haileyschoelkopf committed
280
281
282
283
                    # when we are doing non-greedy generation, don't use the cache
                    # (else every "randomly sampled" generation would be identical for repeats > 1).
                    if not warned:
                        eval_logger.warning(
284
                            f"Arguments to lm.generate_until() '{req.args[1]}' include non-deterministic sampling. Caching will not be performed for such requests."
haileyschoelkopf's avatar
haileyschoelkopf committed
285
286
287
288
289
290
291
292
293
294
295
296
297
                        )
                        warned = True
                    res.append(None)
                    remaining_reqs.append(req)
                elif hsh in self.dbdict:
                    ob = self.dbdict[hsh]

                    assert ob is not None

                    res.append(ob)
                else:
                    res.append(None)
                    remaining_reqs.append(req)
298
299
300
            eval_logger.info(
                f"Cached requests: {len(requests) - len(remaining_reqs)}, Requests remaining: {len(remaining_reqs)}"
            )
Baber's avatar
Baber committed
301
302

            rem_res = getattr(self.lm, attr)(remaining_reqs) if remaining_reqs else []
haileyschoelkopf's avatar
haileyschoelkopf committed
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318

            # stick the new ones back into the list and also cache any of the new ones
            resptr = 0
            for req, r in zip(remaining_reqs, rem_res):
                while res[resptr] is not None:
                    resptr += 1

                res[resptr] = r

                # caching
                hsh = hash_args(attr, req.args)
                self.dbdict[hsh] = r
            self.dbdict.commit()

            return res

319
        return _fn
haileyschoelkopf's avatar
haileyschoelkopf committed
320

Baber's avatar
Baber committed
321
    def get_cache_hook(self) -> CacheHook:
haileyschoelkopf's avatar
haileyschoelkopf committed
322
        return CacheHook(self)
323
324
325
326
327
328
329
330


class TemplateLM(LM):
    """
    A class acting as intermediary between the LM base class
    and boilerplate often included in other LM subclasses.
    """

331
332
    tokenizer = None

333
334
    @property
    @abc.abstractmethod
Baber's avatar
cleanup  
Baber committed
335
    def eot_token_id(self) -> int:
Baber's avatar
cleanup  
Baber committed
336
        """Returns the token ID for the end-of-text token (e.g., EOS)."""
337
338
        pass

339
    @property
Baber's avatar
cleanup  
Baber committed
340
    def prefix_token_id(self) -> int:
Baber's avatar
cleanup  
Baber committed
341
        """Returns the token ID for the prefix token (e.g., BOS or EOS)."""
342
        return self.eot_token_id
343

344
    @abc.abstractmethod
345
    def tok_encode(self, string: str, **kwargs) -> list[int]:
Baber Abbasi's avatar
Baber Abbasi committed
346
347
348
        """
        Tokenize a string using the model's tokenizer and return a list of token IDs.
        """
349
350
351
        pass

    @abc.abstractmethod
352
    def _loglikelihood_tokens(
Baber's avatar
cleanup  
Baber committed
353
        self, requests: list[tuple[tuple[str, str], list[int], list[int]]], **kwargs
354
    ) -> list[tuple[float, bool]]:
Baber's avatar
cleanup  
Baber committed
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
        """Called by loglikelihood to compute log likelihoods for a list of requests.

        Args:
            requests: list[tuple[tuple[str, str], list[int], list[int]]]
                A list of tuples where each tuple contains:
                - (context, continuation) as a tuple of strings
                - context_enc: list of token IDs for the context
                - continuation_enc: list of token IDs for the continuation
        Returns:
            list[tuple[float, bool]]
                A list of tuples where each tuple contains:
                - logprob: float, the (summed) log probability of the continuation given the context
                - isgreedy: bool, whether the continuation would be generated by greedy sampling from the context

        See LM.loglikelihood for more details.
        """
371
372
        pass

Baber Abbasi's avatar
Baber Abbasi committed
373
374
    def _encode_pair(
        self, context: str, continuation: str
375
    ) -> tuple[list[int], list[int]]:
Baber's avatar
cleanup  
Baber committed
376
377
        """Encodes a pair of context and continuation strings into token IDs.

Baber's avatar
cleanup  
Baber committed
378
        We encode using encode(context+continuation) and then split into context and continuation.
Baber's avatar
cleanup  
Baber committed
379
        """
380
381
        import transformers

382
383
384
385
386
        n_spaces = len(context) - len(context.rstrip())
        if n_spaces > 0:
            continuation = context[-n_spaces:] + continuation
            context = context[:-n_spaces]

Lintang Sutawika's avatar
Lintang Sutawika committed
387
        model_class = getattr(self, "AUTO_MODEL_CLASS", None)
388

Lintang Sutawika's avatar
Lintang Sutawika committed
389
390
391
392
393
394
395
396
397
        if model_class == transformers.AutoModelForSeq2SeqLM:
            context_enc = self.tok_encode(context)
            continuation_enc = self.tok_encode(continuation, add_special_tokens=False)
        else:
            whole_enc = self.tok_encode(context + continuation)
            context_enc = self.tok_encode(context)

            context_enc_len = len(context_enc)
            continuation_enc = whole_enc[context_enc_len:]
398
399
400

        return context_enc, continuation_enc

401
    def loglikelihood(
Baber's avatar
Baber committed
402
        self, requests: list[Instance], disable_tqdm: bool = False
403
    ) -> list[tuple[float, bool]]:
Baber's avatar
cleanup  
Baber committed
404
405
406
407
        """Compute log-likelihood of generating a continuation from a context.

        This calls `_loglikelihood_tokens` to compute the log likelihoods for a list of requests, after encoding.
        """
408
409
410
        new_reqs = []
        for context, continuation in [req.args for req in requests]:
            if context == "":
411
                # BOS or EOS as context
412
                context_enc, continuation_enc = (
413
                    [self.prefix_token_id],
414
415
416
417
418
419
420
                    self.tok_encode(continuation),
                )
            else:
                context_enc, continuation_enc = self._encode_pair(context, continuation)

            new_reqs.append(((context, continuation), context_enc, continuation_enc))

421
        return self._loglikelihood_tokens(new_reqs, disable_tqdm=disable_tqdm)
422
423

    @abc.abstractmethod
424
425
    def loglikelihood_rolling(
        self, requests, disable_tqdm: bool = False
426
    ) -> list[float]:
Baber's avatar
cleanup  
Baber committed
427
428
429
430
        """Compute rolling log-likelihood of a sequence using non-overlapping windows.

        See LM.loglikelihood_rolling for more details.
        """
431
432
433
        pass

    @abc.abstractmethod
Baber's avatar
cleanup  
Baber committed
434
    def generate_until(
Baber's avatar
Baber committed
435
        self, requests: list[Instance], disable_tqdm: bool = False
Baber's avatar
cleanup  
Baber committed
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
    ) -> list[str]:
        """Generate until a stopping sequence.

        Args:
            requests: list[Instance]
                A list of Instance objects with property `args` which returns a tuple (context, gen_kwargs).
                context: str
                    Context string
                gen_kwargs: dict
                    A dictionary of keyword arguments to pass to the generation function e.g. top_k, until, etc.
        Returns:
            list[continuation, ...]
                A list of model generated continuations.
                continuation: str
                    The generated continuation.

        See LM.generate_until for more details.
        """
454
        pass
455

Baber's avatar
Baber committed
456
    def chat_template(self, chat_template: bool | str = False) -> str | None:
457
        """
Baber's avatar
cleanup  
Baber committed
458
        Assumes tokenizer has a chat_template attribute (self.tokenizer.chat_template: dict | str)
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
        Set and get the appropriate chat template for the model.
        This method sets the tokenizer's chat_template and returns the template string for reproducibility.

        The template selection logic is adapted from the Transformers library's `apply_chat_template`
        method in the Tokenizer class. The original implementation can be found at:
        https://github.com/huggingface/transformers/blob/fc35907f95459d7a6c5281dfadd680b6f7b620e3/src/transformers/tokenization_utils_base.py#L1687

        This method ensures that the right template is chosen based on the following:
        0. If the model has no 'tokenizer' attribute: assumes that there is only a single possible chat template, handled on the model provider side internally. Returns the empty string.
        1. If the model's tokenizer has multiple templates:
            a. Use the specified template if it exists in the dictionary.
            b. Use the default template from the list if no specific template is provided.
            c. Raise an error if no default template exists and no specific template is provided.
        2. If the model's tokenizer has a single template or no template:
            a. Use the tokenizer's chat template if available.
            b. Fall back to the default chat template if no tokenizer chat template exists.

        Args:
            chat_template (Union[bool, str]): Specifies the chat template to use.
                - If False or None, no template is applied.
                - If True, the default or only available template is used.
                - If a string, the template with the matching name is used.

        Returns:
            Optional[str]: The selected chat template, or None if no template is applied.
        """
        if self.tokenizer is None:
            return ""

        if chat_template is False or chat_template is None:
            eval_logger.warning(
                "model.chat_template was called with the chat_template set to False or None. "
                "Therefore no chat template will be applied. Make sure this is an intended behavior."
            )
            return None

        # Convert boolean chat_template to None to ensure compatibility with the adapted logic
        if isinstance(chat_template, bool):
            chat_template = None
        using_default_template = False

        # First, handle the cases when the model has a dict of multiple templates
501
502
503
504
505
506
        try:
            template = (
                self.tokenizer.chat_template or self.tokenizer.default_chat_template
            )
        except AttributeError:
            return None
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556

        if isinstance(template, dict):
            using_default_dict = self.tokenizer.chat_template is None

            if chat_template is not None:
                if chat_template in template:
                    selected_template = template[chat_template]
                    if using_default_dict:
                        using_default_template = True
                else:
                    raise ValueError(
                        f"The specified chat template '{chat_template}' is not available. "
                        f"Available template names are {sorted(template.keys())}."
                    )
            else:
                # If user didn't pass a chat template, use the default template from the dict
                if "default" in template:
                    selected_template = template["default"]
                    using_default_template = True
                else:
                    raise ValueError(
                        "This model has multiple chat templates with no default specified! Please either pass a chat "
                        "template or the name of the template you wish to use to the `chat_template` argument. Available "
                        f"template names are {sorted(template.keys())}."
                    )

        # Cases when the model has a single template or no template
        else:
            # priority: `chat_template` argument > `tokenizer.chat_template` > `tokenizer.default_chat_template
            if isinstance(chat_template, str):
                eval_logger.warning(
                    "Chat template name provided, but the tokenizer's chat template is not a dictionary. "
                    "Using the tokenizer's chat template or the default template instead."
                )
            if self.tokenizer.chat_template is not None:
                selected_template = self.tokenizer.chat_template
            else:
                selected_template = self.tokenizer.default_chat_template
                using_default_template = True

        if using_default_template:
            eval_logger.warning(
                "No chat template is set for this tokenizer, falling back to a default class-level template. This is "
                "very error-prone, because models are often trained with templates different from the class default! "
                "Default chat templates are a legacy feature and will be removed in Transformers v4.43, at which "
                "point any code depending on them will stop working. We recommend setting a valid chat template before "
                "then to ensure that this model continues working without issues."
            )

        return selected_template