tokenization.py 5.44 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
mshoeybi's avatar
mshoeybi committed
2

mshoeybi's avatar
working  
mshoeybi committed
3
"""Tokenization utilities."""
mshoeybi's avatar
mshoeybi committed
4
5
6
7
8


import torch


xingjinliang's avatar
xingjinliang committed
9
10
from megatron.core import parallel_state
from megatron.training import get_args, get_tokenizer
mshoeybi's avatar
working  
mshoeybi committed
11
from .communication import broadcast_int_list, broadcast_tensor
mshoeybi's avatar
mshoeybi committed
12
13


mshoeybi's avatar
working  
mshoeybi committed
14
15
def detokenize_generations(tokens_gpu_tensor,
                           lengths_gpu_tensor,
xingjinliang's avatar
xingjinliang committed
16
                           detokenize_segments):
mshoeybi's avatar
working  
mshoeybi committed
17
18
19
20
    """Detokenize the generated tokens."""

    tokenizer = get_tokenizer()
    prompts_plus_generations = []
xingjinliang's avatar
xingjinliang committed
21
    prompts_plus_generations_segments = []
mshoeybi's avatar
working  
mshoeybi committed
22
23
24

    tokens = tokens_gpu_tensor.cpu().numpy().tolist()
    lengths = lengths_gpu_tensor.cpu().numpy().tolist()
wangxj's avatar
wangxj committed
25

mshoeybi's avatar
working  
mshoeybi committed
26
27
    for sequence_tokens, length in zip(tokens, lengths):
        sequence_tokens = sequence_tokens[:length]
xingjinliang's avatar
xingjinliang committed
28
29
30
31
32
33
34
35
36
37
38
39
        detok_str = tokenizer.detokenize(sequence_tokens)
        prompts_plus_generations.append(detok_str)
        if detokenize_segments:
            try:
                offsets = tokenizer.offsets(sequence_tokens, detok_str)
                words = [
                    detok_str[start:end]
                    for start, end in zip(offsets, offsets[1:] + [len(detok_str)])
                ]
            except NotImplementedError:
                words = []
                for token in sequence_tokens:
40
                    word = tokenizer.tokenizer.decoder[token]
xingjinliang's avatar
xingjinliang committed
41
42
43
44
                    word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
                        "utf-8", errors="replace"
                    )
                    words.append(word)
mshoeybi's avatar
working  
mshoeybi committed
45

xingjinliang's avatar
xingjinliang committed
46
            prompts_plus_generations_segments.append(words)
mshoeybi's avatar
working  
mshoeybi committed
47

xingjinliang's avatar
xingjinliang committed
48
    return tokens, prompts_plus_generations, prompts_plus_generations_segments
mshoeybi's avatar
working  
mshoeybi committed
49

mshoeybi's avatar
working  
mshoeybi committed
50

mshoeybi's avatar
mshoeybi committed
51
def tokenize_prompts(prompts=None, tokens_to_generate=None,
xingjinliang's avatar
xingjinliang committed
52
53
54
55
56
57
                     add_BOS=None, rank=0, data_parallel=False):
    """Tokenize prompts and make them avaiable on all ranks.

    Args:
        data_parallel (bool): Broadcast tokens across a single data parallel model replica.
    """
mshoeybi's avatar
working  
mshoeybi committed
58
59
60
61
62
63
64

    # On all ranks set to None so we can pass them to functions
    sizes_list = None
    prompts_tokens_cuda_long_tensor = None
    prompts_length_cuda_long_tensor = None

    # On the specified rank, build the above.
xingjinliang's avatar
xingjinliang committed
65
66
67
68
69
    src_rank = torch.distributed.get_rank()
    if data_parallel:
        src_rank = parallel_state.get_data_parallel_src_rank()

    if src_rank == rank:
mshoeybi's avatar
working  
mshoeybi committed
70
71
72
73
        assert prompts is not None
        assert tokens_to_generate is not None
        # Tensor of tokens padded and their unpadded length.
        prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor = \
mshoeybi's avatar
mshoeybi committed
74
            _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS)
mshoeybi's avatar
working  
mshoeybi committed
75
76
77
78
79
        # We need the sizes of these tensors for the boradcast
        sizes_list = [prompts_tokens_cuda_long_tensor.size(0), # Batch size
                      prompts_tokens_cuda_long_tensor.size(1)] # Sequence lenght

    # First, broadcast the sizes.
xingjinliang's avatar
xingjinliang committed
80
    sizes_tensor = broadcast_int_list(2, int_list=sizes_list, rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
81
82
83
84
85

    # Now that we have the sizes, we can boradcast the tokens
    # and length tensors.
    sizes = sizes_tensor.tolist()
    prompts_tokens_cuda_long_tensor = broadcast_tensor(
xingjinliang's avatar
xingjinliang committed
86
        sizes, torch.int64, tensor=prompts_tokens_cuda_long_tensor, rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
87
88
    prompts_length_cuda_long_tensor = broadcast_tensor(
        sizes[0], torch.int64, tensor=prompts_length_cuda_long_tensor,
xingjinliang's avatar
xingjinliang committed
89
        rank=rank, data_parallel=data_parallel)
mshoeybi's avatar
working  
mshoeybi committed
90
91
92
93

    return prompts_tokens_cuda_long_tensor, prompts_length_cuda_long_tensor


mshoeybi's avatar
mshoeybi committed
94
def _tokenize_prompts_and_batch(prompts, tokens_to_generate, add_BOS):
mshoeybi's avatar
mshoeybi committed
95
96
97
98
99
100
101
102
103
104
    """Given a set of prompts and number of tokens to generate:
        - tokenize prompts
        - set the sequence length to be the max of length of prompts
          plus the number of tokens we would like to generate
        - pad all the sequences to this length so we can convert them
          into a 2D tensor.
    """

    # Tokenize all the prompts.
    tokenizer = get_tokenizer()
xingjinliang's avatar
xingjinliang committed
105
106
107
108
109
110
    if hasattr(tokenizer, 'eod'):
        eod_token = tokenizer.eod
    elif hasattr(tokenizer, 'eos_id'):
        eod_token = tokenizer.eos_id
    else:
        raise AttributeError('No eod token found in Tokenizer')
mshoeybi's avatar
mshoeybi committed
111
    if add_BOS:
xingjinliang's avatar
xingjinliang committed
112
        prompts_tokens = [[eod_token] + tokenizer.tokenize(prompt)
mshoeybi's avatar
mshoeybi committed
113
114
115
                          for prompt in prompts]
    else:
        prompts_tokens = [tokenizer.tokenize(prompt) for prompt in prompts]
mshoeybi's avatar
mshoeybi committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129

    # Now we have a list of list of tokens which each list has a different
    # size. We want to extend this list to:
    #   - incorporate the tokens that need to be generated
    #   - make all the sequences equal length.
    # Get the prompts length.
    prompts_length = [len(prompt_tokens) for prompt_tokens in prompts_tokens]
    # Get the max prompts length.
    max_prompt_len = max(prompts_length)
    # Number of tokens in the each sample of the batch.
    samples_length = max_prompt_len + tokens_to_generate
    # Now update the list of list to be of the same size: samples_length.
    for prompt_tokens, prompt_length in zip(prompts_tokens, prompts_length):
        padding_size = samples_length - prompt_length
xingjinliang's avatar
xingjinliang committed
130
        prompt_tokens.extend([eod_token] * padding_size)
mshoeybi's avatar
mshoeybi committed
131
132

    # Now we are in a structured format, we can convert to tensors.
xingjinliang's avatar
xingjinliang committed
133
134
    prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.long, device='cuda')
    prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.long, device='cuda')
mshoeybi's avatar
mshoeybi committed
135
136

    return prompts_tokens_tensor, prompts_length_tensor