api.py 7.9 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
mshoeybi's avatar
working  
mshoeybi committed
2
3
4
5
6
7

"""Inference API."""


import torch

mshoeybi's avatar
mshoeybi committed
8
from megatron import mpu
mshoeybi's avatar
working  
mshoeybi committed
9
from .communication import broadcast_float_list
10
11
from .generation import (
        generate_tokens_probs_and_return_on_first_stage,
rprenger's avatar
rprenger committed
12
13
        score_and_return_on_first_stage,
        beam_search_and_return_on_first_stage)
mshoeybi's avatar
mshoeybi committed
14
15
16
17
18
19
20
21
from .tokenization import (
    tokenize_prompts,
    detokenize_generations)

def generate_and_post_process(model,
                              prompts=None,
                              tokens_to_generate=0,
                              return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
22
23
                              top_k_sampling=0,
                              top_p_sampling=0.0,
24
25
                              top_p_decay=0.0,
                              top_p_bound=0.0,
mshoeybi's avatar
mshoeybi committed
26
                              temperature=1.0,
mshoeybi's avatar
mshoeybi committed
27
                              add_BOS=False,
28
29
                              use_eod_token_for_early_termination=True,
                              stop_on_double_eol=False,
30
31
                              stop_on_eol=False,
                              random_seed=-1):
mshoeybi's avatar
mshoeybi committed
32
    """Run inference and post-process outputs, i.e., detokenize,
mshoeybi's avatar
mshoeybi committed
33
    move to cpu and convert to list."""
mshoeybi's avatar
mshoeybi committed
34
35

    # Main inference.
36
    tokens, lengths, output_log_probs = generate(
mshoeybi's avatar
mshoeybi committed
37
38
39
40
        model,
        prompts=prompts,
        tokens_to_generate=tokens_to_generate,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
41
42
        top_k_sampling=top_k_sampling,
        top_p_sampling=top_p_sampling,
43
44
        top_p_decay=top_p_decay,
        top_p_bound=top_p_bound,
mshoeybi's avatar
mshoeybi committed
45
        temperature=temperature,
mshoeybi's avatar
mshoeybi committed
46
        add_BOS=add_BOS,
47
48
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
49
50
        stop_on_eol=stop_on_eol,
        random_seed=random_seed)
mshoeybi's avatar
mshoeybi committed
51
52
53
54
55
56
57
58

    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        tokens, prompts_plus_generations, prompts_plus_generations_segments = \
            detokenize_generations(tokens, lengths, True)

        if return_output_log_probs:
            output_log_probs = output_log_probs.cpu().numpy().tolist()
59
60
            for i, (prob, seg) in enumerate(zip(output_log_probs, prompts_plus_generations_segments)):
                output_log_probs[i] = prob[:len(seg)-1]
mshoeybi's avatar
mshoeybi committed
61
62

        return prompts_plus_generations, prompts_plus_generations_segments, \
63
            output_log_probs, tokens
mshoeybi's avatar
mshoeybi committed
64
65

    return None
mshoeybi's avatar
working  
mshoeybi committed
66
67
68
69
70

def generate(model,
             prompts=None,
             tokens_to_generate=0,
             return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
71
72
             top_k_sampling=0,
             top_p_sampling=0.0,
73
74
             top_p_decay=0.0,
             top_p_bound=0.0,
mshoeybi's avatar
mshoeybi committed
75
             temperature=1.0,
mshoeybi's avatar
mshoeybi committed
76
             add_BOS=False,
77
78
             use_eod_token_for_early_termination=True,
             stop_on_double_eol=False,
79
80
             stop_on_eol=False,
             random_seed=-1):
mshoeybi's avatar
mshoeybi committed
81
82
83
84
85
86
87
    """Given prompts and input parameters, run inference and return:
       tokens: prompts plus the generated tokens.
       lengths: length of the prompt + generations. Note that we can
           discard tokens in the tokens tensor that are after the
           corresponding length.
       output_log_probs: log probs of the tokens.
    """
mshoeybi's avatar
working  
mshoeybi committed
88
89

    # Make sure input params are avaialble to all ranks.
mshoeybi's avatar
mshoeybi committed
90
    values = [tokens_to_generate,
91
              return_output_log_probs,
92
              top_k_sampling, top_p_sampling, top_p_decay, top_p_bound,
93
94
              temperature, add_BOS, use_eod_token_for_early_termination,
              stop_on_double_eol,
95
96
              stop_on_eol,
              random_seed]
97
    values_float_tensor = broadcast_float_list(12, float_list=values)
mshoeybi's avatar
working  
mshoeybi committed
98
99
    tokens_to_generate = int(values_float_tensor[0].item())
    return_output_log_probs = bool(values_float_tensor[1].item())
mshoeybi's avatar
mshoeybi committed
100
101
    top_k_sampling = int(values_float_tensor[2].item())
    top_p_sampling = values_float_tensor[3].item()
102
103
    top_p_decay = values_float_tensor[4].item()
    top_p_bound = values_float_tensor[5].item()
104
105
106
107
108
109
    temperature = values_float_tensor[6].item()
    add_BOS = bool(values_float_tensor[7].item())
    use_eod_token_for_early_termination = bool(values_float_tensor[8].item())
    stop_on_double_eol = bool(values_float_tensor[9].item())
    stop_on_eol = bool(values_float_tensor[10].item())
    random_seed = int(values_float_tensor[11].item())
110
111
112

    if random_seed != -1:
        torch.random.manual_seed(random_seed)
mshoeybi's avatar
working  
mshoeybi committed
113
114
115
116
117

    # Tokenize prompts and get the batch.
    # Note that these tensors are broadcaseted to all ranks.
    if torch.distributed.get_rank() == 0:
        assert prompts is not None
rprenger's avatar
rprenger committed
118
    
mshoeybi's avatar
working  
mshoeybi committed
119
    context_tokens_tensor, context_length_tensor = tokenize_prompts(
mshoeybi's avatar
mshoeybi committed
120
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
mshoeybi's avatar
working  
mshoeybi committed
121

122
    if tokens_to_generate == 0:
123
124
        return score_and_return_on_first_stage(
            model, context_tokens_tensor, context_length_tensor)
125
    
mshoeybi's avatar
working  
mshoeybi committed
126
127
128
129
130
    # Main inference function.
    # Note that the outputs are available on the first stage.
    return generate_tokens_probs_and_return_on_first_stage(
        model, context_tokens_tensor, context_length_tensor,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
131
132
        top_k=top_k_sampling,
        top_p=top_p_sampling,
133
134
        top_p_decay=top_p_decay,
        top_p_bound=top_p_bound,
mshoeybi's avatar
mshoeybi committed
135
        temperature=temperature,
136
137
138
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
        stop_on_eol=stop_on_eol)
rprenger's avatar
rprenger committed
139
140
141
142
143

def beam_search_and_post_process(model,
                                 prompts=None,
                                 tokens_to_generate=0,
                                 beam_size=0,
144
145
                                 add_BOS=False,
                                 stop_token=50256,
146
147
                                 num_return_gen=1,
                                 length_penalty=1):
rprenger's avatar
rprenger committed
148
149
150
151
152
153
154
155
    """Run beam search and post-process outputs, i.e., detokenize,
    move to cpu and convert to list."""

    # Main inference.
    tokens, scores = beam_search(model,
                                 prompts=prompts,
                                 tokens_to_generate=tokens_to_generate,
                                 beam_size=beam_size,
156
157
                                 add_BOS=add_BOS,
                                 stop_token=stop_token,
158
159
                                 num_return_gen=num_return_gen,
                                 length_penalty=length_penalty)
rprenger's avatar
rprenger committed
160
161
162
163
    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        lengths = tokens.size(1)*torch.ones(beam_size, dtype=torch.int64, device=torch.cuda.current_device()) 
        tokens, prompts_plus_generations, prompts_plus_generations_segments = detokenize_generations(tokens, lengths, True)
rprenger's avatar
rprenger committed
164
165
        scores = scores.cpu().numpy().tolist()
        return prompts_plus_generations, prompts_plus_generations_segments, scores
rprenger's avatar
rprenger committed
166
167
168

    return None

169
def beam_search(model, prompts=None, tokens_to_generate=0, beam_size=0, add_BOS=False, stop_token=50256, num_return_gen=1, length_penalty=1):
rprenger's avatar
rprenger committed
170
171
172
    # Make sure input params are avaialble to all ranks.
    values = [tokens_to_generate,
              beam_size,
173
174
175
176
              add_BOS,
              stop_token,
              num_return_gen,
              length_penalty]
177
    values_float_tensor = broadcast_float_list(6, float_list=values)
rprenger's avatar
rprenger committed
178
179
180
    tokens_to_generate = int(values_float_tensor[0].item())
    beam_size = int(values_float_tensor[1].item())
    add_BOS = bool(values_float_tensor[2].item())
181
182
183
    stop_token = int(values_float_tensor[3].item())
    num_return_gen = int(values_float_tensor[4].item())
    length_penalty = values_float_tensor[5].item()
rprenger's avatar
rprenger committed
184
185
186
187

    context_tokens_tensor, context_length_tensor = tokenize_prompts(
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
    
188
    return beam_search_and_return_on_first_stage(model, context_tokens_tensor, context_length_tensor, 
189
            beam_size, stop_token=stop_token, num_return_gen=num_return_gen, length_penalty=length_penalty)