tokenization.py 5.44 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
mshoeybi's avatar
mshoeybi committed
2

mshoeybi's avatar
working  
mshoeybi committed
3
"""Tokenization utilities."""
mshoeybi's avatar
mshoeybi committed
4
5
6
7
8


import torch


xingjinliang's avatar
xingjinliang committed
9
10
from megatron.core import parallel_state
from megatron.training import get_args, get_tokenizer
mshoeybi's avatar
working  
mshoeybi committed
11
from .communication import broadcast_int_list, broadcast_tensor
mshoeybi's avatar
mshoeybi committed
12
13


mshoeybi's avatar
working  
mshoeybi committed
14
15
def detokenize_generations(tokens_gpu_tensor,
                           lengths_gpu_tensor,
xingjinliang's avatar
xingjinliang committed
16
                           detokenize_segments):
mshoeybi's avatar
working  
mshoeybi committed
17
18
19
20
    """Detokenize the generated tokens."""

    tokenizer = get_tokenizer()
    prompts_plus_generations = []
xingjinliang's avatar
xingjinliang committed
21
    prompts_plus_generations_segments = []
mshoeybi's avatar
working  
mshoeybi committed
22
23
24
25
26

    tokens = tokens_gpu_tensor.cpu().numpy().tolist()
    lengths = lengths_gpu_tensor.cpu().numpy().tolist()
    for sequence_tokens, length in zip(tokens, lengths):
        sequence_tokens = sequence_tokens[:length]
xingjinliang's avatar
xingjinliang committed
27
28
29
30
31
32
33
34
35
36
37
38
        detok_str = tokenizer.detokenize(sequence_tokens)
        prompts_plus_generations.append(detok_str)
        if detokenize_segments:
            try:
                offsets = tokenizer.offsets(sequence_tokens, detok_str)
                words = [
                    detok_str[start:end]
                    for start, end in zip(offsets, offsets[1:] + [len(detok_str)])
                ]
            except NotImplementedError:
                words = []
                for token in sequence_tokens:
39
                    word = tokenizer.tokenizer.decoder[token]
xingjinliang's avatar
xingjinliang committed
40
41
42
43
                    word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
                        "utf-8", errors="replace"
                    )
                    words.append(word)
mshoeybi's avatar
working  
mshoeybi committed
44

xingjinliang's avatar
xingjinliang committed
45
            prompts_plus_generations_segments.append(words)
mshoeybi's avatar
working  
mshoeybi committed
46

xingjinliang's avatar
xingjinliang committed
47
    return tokens, prompts_plus_generations, prompts_plus_generations_segments
mshoeybi's avatar
working  
mshoeybi committed
48

mshoeybi's avatar
working  
mshoeybi committed
49

mshoeybi's avatar
mshoeybi committed
50
def tokenize_prompts(prompts=None, tokens_to_generate=None,
xingjinliang's avatar
xingjinliang committed
51
52
53
54
55
56
                     add_BOS=None, rank=0, data_parallel=False):
    """Tokenize prompts and make them avaiable on all ranks.

    Args:
        data_parallel (bool): Broadcast tokens across a single data parallel model replica.
    """
mshoeybi's avatar
working  
mshoeybi committed
57
58
59
60
61
62
63

    # On all ranks set to None so we can pass them to functions
    sizes_list = None
    prompts_tokens_cuda_long_tensor = None
    prompts_length_cuda_long_tensor = None

    # On the specified rank, build the above.
xingjinliang's avatar
xingjinliang committed
64
65
66
67
68
    src_rank = torch.distributed.get_rank()
    if data_parallel:
        src_rank = parallel_state.get_data_parallel_src_rank()

    if src_rank == rank:
mshoeybi's avatar
working  
mshoeybi committed
69
70
71
72
        assert prompts is not None
        assert tokens_to_generate is not None
        # Tensor of tokens padded and their unpadded length.
        prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor = \
mshoeybi's avatar
mshoeybi committed
73
            _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS)
mshoeybi's avatar
working  
mshoeybi committed
74
75
76
77
78
        # We need the sizes of these tensors for the boradcast
        sizes_list = [prompts_tokens_cuda_long_tensor.size(0), # Batch size
                      prompts_tokens_cuda_long_tensor.size(1)] # Sequence lenght

    # First, broadcast the sizes.
xingjinliang's avatar
xingjinliang committed
79
    sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
80
81
82
83
84

    # Now that we have the sizes, we can boradcast the tokens
    # and length tensors.
    sizes = sizes_tensor.tolist()
    prompts_tokens_cuda_long_tensor = broadcast_tensor(
xingjinliang's avatar
xingjinliang committed
85
        sizes, torch.int64, tensor=prompts_tokens_cuda_long_tensor, rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
86
87
    prompts_length_cuda_long_tensor = broadcast_tensor(
        sizes[0], torch.int64, tensor=prompts_length_cuda_long_tensor,
xingjinliang's avatar
xingjinliang committed
88
        rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
89
90
91
92

    return prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor


mshoeybi's avatar
mshoeybi committed
93
def _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS):
mshoeybi's avatar
mshoeybi committed
94
95
96
97
98
99
100
101
102
103
    """Given a set of prompts and number of tokens to generate:
        - tokenize prompts
        - set the sequence length to be the max of length of prompts
          plus the number of tokens we would like to generate
        - pad all the sequences to this length so we can convert them
          into a 2D tensor.
    """

    # Tokenize all the prompts.
    tokenizer = get_tokenizer()
xingjinliang's avatar
xingjinliang committed
104
105
106
107
108
109
    if hasattr(tokenizer, 'eod'):
        eod_token = tokenizer.eod
    elif hasattr(tokenizer, 'eos_id'):
        eod_token = tokenizer.eos_id
    else:
        raise AttributeError('No eod token found in Tokenizer')
mshoeybi's avatar
mshoeybi committed
110
    if add_BOS:
xingjinliang's avatar
xingjinliang committed
111
        prompts_tokens = [[eod_token] + tokenizer.tokenize(prompt)
mshoeybi's avatar
mshoeybi committed
112
113
114
                          for prompt in prompts]
    else:
        prompts_tokens = [tokenizer.tokenize(prompt) for prompt in prompts]
mshoeybi's avatar
mshoeybi committed
115
116
117
118
119
120
121
122
123
124
125
126
127
128

    # Now we have a list of list of tokens which each list has a different
    # size. We want to extend this list to:
    #   - incorporate the tokens that need to be generated
    #   - make all the sequences equal length.
    # Get the prompts length.
    prompts_length = [len(prompt_tokens) for prompt_tokens in prompts_tokens]
    # Get the max prompts length.
    max_prompt_len = max(prompts_length)
    # Number of tokens in the each sample of the batch.
    samples_length = max_prompt_len + tokens_to_generate
    # Now update the list of list to be of the same size: samples_length.
    for prompt_tokens, prompt_length in zip(prompts_tokens, prompts_length):
        padding_size = samples_length - prompt_length
xingjinliang's avatar
xingjinliang committed
129
        prompt_tokens.extend([eod_token] * padding_size)
mshoeybi's avatar
mshoeybi committed
130
131

    # Now we are in a structured format, we can convert to tensors.
xingjinliang's avatar
xingjinliang committed
132
133
    prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.long, device='cuda')
    prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.long, device='cuda')
mshoeybi's avatar
mshoeybi committed
134
135

    return prompts_tokens_tensor, prompts_length_tensor