api.py 8.62 KB
Newer Older
Jared Casper's avatar
Jared Casper committed
1
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
mshoeybi's avatar
working  
mshoeybi committed
2
3
4
5
6
7

"""Inference API."""


import torch

mshoeybi's avatar
mshoeybi committed
8
from megatron import mpu
mshoeybi's avatar
working  
mshoeybi committed
9
from .communication import broadcast_float_list
10
11
from .generation import (
        generate_tokens_probs_and_return_on_first_stage,
rprenger's avatar
rprenger committed
12
13
        score_and_return_on_first_stage,
        beam_search_and_return_on_first_stage)
mshoeybi's avatar
mshoeybi committed
14
15
16
17
18
19
20
21
from .tokenization import (
    tokenize_prompts,
    detokenize_generations)

def generate_and_post_process(model,
                              prompts=None,
                              tokens_to_generate=0,
                              return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
22
23
                              top_k_sampling=0,
                              top_p_sampling=0.0,
24
25
                              top_p_decay=0.0,
                              top_p_bound=0.0,
mshoeybi's avatar
mshoeybi committed
26
                              temperature=1.0,
mshoeybi's avatar
mshoeybi committed
27
                              add_BOS=False,
28
29
                              use_eod_token_for_early_termination=True,
                              stop_on_double_eol=False,
30
                              stop_on_eol=False,
Peng Xu's avatar
Peng Xu committed
31
                              prevent_newline_after_colon=False,
32
                              random_seed=-1):
mshoeybi's avatar
mshoeybi committed
33
    """Run inference and post-process outputs, i.e., detokenize,
mshoeybi's avatar
mshoeybi committed
34
    move to cpu and convert to list."""
mshoeybi's avatar
mshoeybi committed
35
36

    # Main inference.
37
    tokens, lengths, output_log_probs = generate(
mshoeybi's avatar
mshoeybi committed
38
39
40
41
        model,
        prompts=prompts,
        tokens_to_generate=tokens_to_generate,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
42
43
        top_k_sampling=top_k_sampling,
        top_p_sampling=top_p_sampling,
44
45
        top_p_decay=top_p_decay,
        top_p_bound=top_p_bound,
mshoeybi's avatar
mshoeybi committed
46
        temperature=temperature,
mshoeybi's avatar
mshoeybi committed
47
        add_BOS=add_BOS,
48
49
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
50
        stop_on_eol=stop_on_eol,
Peng Xu's avatar
Peng Xu committed
51
        prevent_newline_after_colon=prevent_newline_after_colon,
52
        random_seed=random_seed)
mshoeybi's avatar
mshoeybi committed
53
54
55
56
57
58
59
60

    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        tokens, prompts_plus_generations, prompts_plus_generations_segments = \
            detokenize_generations(tokens, lengths, True)

        if return_output_log_probs:
            output_log_probs = output_log_probs.cpu().numpy().tolist()
61
62
            for i, (prob, seg) in enumerate(zip(output_log_probs, prompts_plus_generations_segments)):
                output_log_probs[i] = prob[:len(seg)-1]
mshoeybi's avatar
mshoeybi committed
63
64

        return prompts_plus_generations, prompts_plus_generations_segments, \
65
            output_log_probs, tokens
mshoeybi's avatar
mshoeybi committed
66
67

    return None
mshoeybi's avatar
working  
mshoeybi committed
68
69
70
71
72

def generate(model,
             prompts=None,
             tokens_to_generate=0,
             return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
73
74
             top_k_sampling=0,
             top_p_sampling=0.0,
75
76
             top_p_decay=0.0,
             top_p_bound=0.0,
mshoeybi's avatar
mshoeybi committed
77
             temperature=1.0,
mshoeybi's avatar
mshoeybi committed
78
             add_BOS=False,
79
80
             use_eod_token_for_early_termination=True,
             stop_on_double_eol=False,
81
             stop_on_eol=False,
Peng Xu's avatar
Peng Xu committed
82
             prevent_newline_after_colon=False,
83
             random_seed=-1):
mshoeybi's avatar
mshoeybi committed
84
85
86
87
88
89
90
    """Given prompts and input parameters, run inference and return:
       tokens: prompts plus the generated tokens.
       lengths: length of the prompt + generations. Note that we can
           discard tokens in the tokens tensor that are after the
           corresponding length.
       output_log_probs: log probs of the tokens.
    """
mshoeybi's avatar
working  
mshoeybi committed
91
92

    # Make sure input params are avaialble to all ranks.
mshoeybi's avatar
mshoeybi committed
93
    values = [tokens_to_generate,
94
              return_output_log_probs,
95
              top_k_sampling, top_p_sampling, top_p_decay, top_p_bound,
96
97
              temperature, add_BOS, use_eod_token_for_early_termination,
              stop_on_double_eol,
98
              stop_on_eol,
Peng Xu's avatar
Peng Xu committed
99
              prevent_newline_after_colon,
100
              random_seed]
Peng Xu's avatar
Peng Xu committed
101
    values_float_tensor = broadcast_float_list(len(values), float_list=values)
mshoeybi's avatar
working  
mshoeybi committed
102
103
    tokens_to_generate = int(values_float_tensor[0].item())
    return_output_log_probs = bool(values_float_tensor[1].item())
mshoeybi's avatar
mshoeybi committed
104
105
    top_k_sampling = int(values_float_tensor[2].item())
    top_p_sampling = values_float_tensor[3].item()
106
107
    top_p_decay = values_float_tensor[4].item()
    top_p_bound = values_float_tensor[5].item()
108
109
110
111
112
    temperature = values_float_tensor[6].item()
    add_BOS = bool(values_float_tensor[7].item())
    use_eod_token_for_early_termination = bool(values_float_tensor[8].item())
    stop_on_double_eol = bool(values_float_tensor[9].item())
    stop_on_eol = bool(values_float_tensor[10].item())
Peng Xu's avatar
Peng Xu committed
113
114
    prevent_newline_after_colon = bool(values_float_tensor[11].item())
    random_seed = int(values_float_tensor[12].item())
115
116
117

    if random_seed != -1:
        torch.random.manual_seed(random_seed)
mshoeybi's avatar
working  
mshoeybi committed
118
119
120
121
122

    # Tokenize prompts and get the batch.
    # Note that these tensors are broadcaseted to all ranks.
    if torch.distributed.get_rank() == 0:
        assert prompts is not None
rprenger's avatar
rprenger committed
123
    
mshoeybi's avatar
working  
mshoeybi committed
124
    context_tokens_tensor, context_length_tensor = tokenize_prompts(
mshoeybi's avatar
mshoeybi committed
125
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
mshoeybi's avatar
working  
mshoeybi committed
126

127
    if tokens_to_generate == 0:
128
129
        return score_and_return_on_first_stage(
            model, context_tokens_tensor, context_length_tensor)
130
    
mshoeybi's avatar
working  
mshoeybi committed
131
132
133
134
135
    # Main inference function.
    # Note that the outputs are available on the first stage.
    return generate_tokens_probs_and_return_on_first_stage(
        model, context_tokens_tensor, context_length_tensor,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
136
137
        top_k=top_k_sampling,
        top_p=top_p_sampling,
138
139
        top_p_decay=top_p_decay,
        top_p_bound=top_p_bound,
mshoeybi's avatar
mshoeybi committed
140
        temperature=temperature,
141
142
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
Peng Xu's avatar
Peng Xu committed
143
144
        stop_on_eol=stop_on_eol,
        prevent_newline_after_colon=prevent_newline_after_colon)
rprenger's avatar
rprenger committed
145
146
147
148
149

def beam_search_and_post_process(model,
                                 prompts=None,
                                 tokens_to_generate=0,
                                 beam_size=0,
150
151
                                 add_BOS=False,
                                 stop_token=50256,
152
                                 num_return_gen=1,
Peng Xu's avatar
Peng Xu committed
153
154
                                 length_penalty=1,
                                 prevent_newline_after_colon=False):
rprenger's avatar
rprenger committed
155
156
157
158
159
160
161
162
    """Run beam search and post-process outputs, i.e., detokenize,
    move to cpu and convert to list."""

    # Main inference.
    tokens, scores = beam_search(model,
                                 prompts=prompts,
                                 tokens_to_generate=tokens_to_generate,
                                 beam_size=beam_size,
163
164
                                 add_BOS=add_BOS,
                                 stop_token=stop_token,
165
                                 num_return_gen=num_return_gen,
Peng Xu's avatar
Peng Xu committed
166
167
                                 length_penalty=length_penalty,
                                 prevent_newline_after_colon=prevent_newline_after_colon)
rprenger's avatar
rprenger committed
168
169
170
171
    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        lengths = tokens.size(1)*torch.ones(beam_size, dtype=torch.int64, device=torch.cuda.current_device()) 
        tokens, prompts_plus_generations, prompts_plus_generations_segments = detokenize_generations(tokens, lengths, True)
rprenger's avatar
rprenger committed
172
173
        scores = scores.cpu().numpy().tolist()
        return prompts_plus_generations, prompts_plus_generations_segments, scores
rprenger's avatar
rprenger committed
174
175
176

    return None

Peng Xu's avatar
Peng Xu committed
177
def beam_search(model, prompts=None, tokens_to_generate=0, beam_size=0, add_BOS=False, stop_token=50256, num_return_gen=1, length_penalty=1, prevent_newline_after_colon=False):
rprenger's avatar
rprenger committed
178
179
180
    # Make sure input params are avaialble to all ranks.
    values = [tokens_to_generate,
              beam_size,
181
182
183
              add_BOS,
              stop_token,
              num_return_gen,
Peng Xu's avatar
Peng Xu committed
184
185
186
              length_penalty,
              prevent_newline_after_colon]
    values_float_tensor = broadcast_float_list(len(values), float_list=values)
rprenger's avatar
rprenger committed
187
188
189
    tokens_to_generate = int(values_float_tensor[0].item())
    beam_size = int(values_float_tensor[1].item())
    add_BOS = bool(values_float_tensor[2].item())
190
191
192
    stop_token = int(values_float_tensor[3].item())
    num_return_gen = int(values_float_tensor[4].item())
    length_penalty = values_float_tensor[5].item()
Peng Xu's avatar
Peng Xu committed
193
    prevent_newline_after_colon = values_float_tensor[6].item()
rprenger's avatar
rprenger committed
194
195
196
197

    context_tokens_tensor, context_length_tensor = tokenize_prompts(
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
    
198
    return beam_search_and_return_on_first_stage(model, context_tokens_tensor, context_length_tensor, 
Peng Xu's avatar
Peng Xu committed
199
200
            beam_size, stop_token=stop_token, num_return_gen=num_return_gen, length_penalty=length_penalty,
            prevent_newline_after_colon=prevent_newline_after_colon)