test_repetition_penalty.py 3.17 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import typing
import unittest

import torch

from sglang.srt.sampling.penaltylib.penalizers.repetition_penalty import (
    BatchedRepetitionPenalizer,
)
from sglang.test.srt.sampling.penaltylib.utils import (
    BaseBatchedPenalizerTest,
    MockSamplingParams,
    Step,
    StepType,
    Subject,
)

REPETITION_PENALTY = 2.0


class TestBatchedRepetitionPenalizer(BaseBatchedPenalizerTest):
    Penalizer = BatchedRepetitionPenalizer

    def _create_subject(self, repetition_penalty: float) -> Subject:
        l = 1.0 / repetition_penalty
        return Subject(
            sampling_params=MockSamplingParams(
                repetition_penalty=repetition_penalty,
            ),
            steps=[
                Step(
                    type=StepType.INPUT,
                    token_ids=[0, 1, 2],
                    expected_tensors={
                        "repetition_penalties": self.tensor(
                            [[repetition_penalty] * self.vocab_size],
                            dtype=torch.float32,
                        ),
                        "cumulated_repetition_penalties": (
                            self.tensor(
                                [[2.0, 2.0, 2.0, 1.0, 1.0]], dtype=torch.float32
                            )
                            if repetition_penalty != 1.0
                            else self.tensor(
                                [[1.0] * self.vocab_size], dtype=torch.float32
                            )
                        ),
                    },
                    expected_logits=(
                        self.tensor([[l, l, l, 1.0, 1.0]], dtype=torch.float32)
                        if repetition_penalty != 1.0
                        else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
                    ),
                ),
                Step(
                    type=StepType.OUTPUT,
                    token_ids=[0, 1, 3],
                    expected_tensors={
                        "repetition_penalties": self.tensor(
                            [[repetition_penalty] * self.vocab_size],
                            dtype=torch.float32,
                        ),
                        "cumulated_repetition_penalties": (
                            self.tensor(
                                [[2.0, 2.0, 2.0, 2.0, 1.0]], dtype=torch.float32
                            )
                            if repetition_penalty != 1.0
                            else self.tensor(
                                [[1.0] * self.vocab_size], dtype=torch.float32
                            )
                        ),
                    },
                    expected_logits=(
                        self.tensor([[l, l, l, l, 1.0]], dtype=torch.float32)
                        if repetition_penalty != 1.0
                        else self.tensor([[1.0] * self.vocab_size], dtype=torch.float32)
                    ),
                ),
            ],
        )

    def create_test_subjects(self) -> typing.List[Subject]:
        self.enabled = self._create_subject(repetition_penalty=REPETITION_PENALTY)
        self.disabled = self._create_subject(repetition_penalty=1.0)


if __name__ == "__main__":
    unittest.main()