reward_model.py 1.39 KB
Newer Older
ver217's avatar
ver217 committed
1
2
3
4
5
from typing import Optional

import torch
import torch.nn as nn

Fazzie-Maqianli's avatar
Fazzie-Maqianli committed
6
from ..lora import LoRAModule
ver217's avatar
ver217 committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25


class RewardModel(LoRAModule):
    """
    Reward model base class.

    Args:
        model (nn.Module): Reward model.
        value_head (nn.Module): Value head to get reward score.
        lora_rank (int): LoRA rank.
        lora_train_bias (str): LoRA bias training mode.
    """

    def __init__(self,
                 model: nn.Module,
                 value_head: Optional[nn.Module] = None,
                 lora_rank: int = 0,
                 lora_train_bias: str = 'none') -> None:
        super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
BlueRum's avatar
BlueRum committed
26
        self.model = model
27
28
        self.convert_to_lora()

ver217's avatar
ver217 committed
29
30
31
32
33
34
35
36
        if value_head is not None:
            if value_head.out_features != 1:
                raise ValueError("The value head of reward model's output dim should be 1!")
            self.value_head = value_head
        else:
            self.value_head = nn.Linear(model.config.n_embd, 1)

    def forward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
BlueRum's avatar
BlueRum committed
37
        outputs = self.model(sequences, attention_mask=attention_mask)
ver217's avatar
ver217 committed
38
39
40
41
        last_hidden_states = outputs['last_hidden_state']
        values = self.value_head(last_hidden_states)[:, :-1]
        value = values.mean(dim=1).squeeze(1)    # ensure shape is (B)
        return value