api.py 7.28 KB
Newer Older
mshoeybi's avatar
working  
mshoeybi committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Inference API."""


import torch

mshoeybi's avatar
mshoeybi committed
21
from megatron import mpu
mshoeybi's avatar
working  
mshoeybi committed
22
from .communication import broadcast_float_list
23
24
from .generation import (
        generate_tokens_probs_and_return_on_first_stage,
rprenger's avatar
rprenger committed
25
26
        score_and_return_on_first_stage,
        beam_search_and_return_on_first_stage)
mshoeybi's avatar
mshoeybi committed
27
28
29
30
31
32
33
34
from .tokenization import (
    tokenize_prompts,
    detokenize_generations)

def generate_and_post_process(model,
                              prompts=None,
                              tokens_to_generate=0,
                              return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
35
36
                              top_k_sampling=0,
                              top_p_sampling=0.0,
mshoeybi's avatar
mshoeybi committed
37
                              temperature=1.0,
mshoeybi's avatar
mshoeybi committed
38
                              add_BOS=False,
39
40
                              use_eod_token_for_early_termination=True,
                              stop_on_double_eol=False,
41
42
                              stop_on_eol=False,
                              random_seed=-1):
mshoeybi's avatar
mshoeybi committed
43
    """Run inference and post-process outputs, i.e., detokenize,
mshoeybi's avatar
mshoeybi committed
44
    move to cpu and convert to list."""
mshoeybi's avatar
mshoeybi committed
45
46

    # Main inference.
47
    tokens, lengths, output_log_probs = generate(
mshoeybi's avatar
mshoeybi committed
48
49
50
51
        model,
        prompts=prompts,
        tokens_to_generate=tokens_to_generate,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
52
53
        top_k_sampling=top_k_sampling,
        top_p_sampling=top_p_sampling,
mshoeybi's avatar
mshoeybi committed
54
        temperature=temperature,
mshoeybi's avatar
mshoeybi committed
55
        add_BOS=add_BOS,
56
57
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
58
59
        stop_on_eol=stop_on_eol,
        random_seed=random_seed)
mshoeybi's avatar
mshoeybi committed
60
61
62
63
64
65
66
67

    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        tokens, prompts_plus_generations, prompts_plus_generations_segments = \
            detokenize_generations(tokens, lengths, True)

        if return_output_log_probs:
            output_log_probs = output_log_probs.cpu().numpy().tolist()
68
69
            for i, (prob, seg) in enumerate(zip(output_log_probs, prompts_plus_generations_segments)):
                output_log_probs[i] = prob[:len(seg)-1]
mshoeybi's avatar
mshoeybi committed
70
71

        return prompts_plus_generations, prompts_plus_generations_segments, \
72
            output_log_probs, tokens
mshoeybi's avatar
mshoeybi committed
73
74

    return None
mshoeybi's avatar
working  
mshoeybi committed
75
76
77
78
79

def generate(model,
             prompts=None,
             tokens_to_generate=0,
             return_output_log_probs=False,
mshoeybi's avatar
mshoeybi committed
80
81
             top_k_sampling=0,
             top_p_sampling=0.0,
mshoeybi's avatar
mshoeybi committed
82
             temperature=1.0,
mshoeybi's avatar
mshoeybi committed
83
             add_BOS=False,
84
85
             use_eod_token_for_early_termination=True,
             stop_on_double_eol=False,
86
87
             stop_on_eol=False,
             random_seed=-1):
mshoeybi's avatar
mshoeybi committed
88
89
90
91
92
93
94
    """Given prompts and input parameters, run inference and return:
       tokens: prompts plus the generated tokens.
       lengths: length of the prompt + generations. Note that we can
           discard tokens in the tokens tensor that are after the
           corresponding length.
       output_log_probs: log probs of the tokens.
    """
mshoeybi's avatar
working  
mshoeybi committed
95
96

    # Make sure input params are avaialble to all ranks.
mshoeybi's avatar
mshoeybi committed
97
    values = [tokens_to_generate,
98
              return_output_log_probs,
rprenger's avatar
rprenger committed
99
              top_k_sampling, top_p_sampling,
100
101
              temperature, add_BOS, use_eod_token_for_early_termination,
              stop_on_double_eol,
102
103
104
              stop_on_eol,
              random_seed]
    values_float_tensor = broadcast_float_list(10, float_list=values)
mshoeybi's avatar
working  
mshoeybi committed
105
106
    tokens_to_generate = int(values_float_tensor[0].item())
    return_output_log_probs = bool(values_float_tensor[1].item())
mshoeybi's avatar
mshoeybi committed
107
108
109
110
111
    top_k_sampling = int(values_float_tensor[2].item())
    top_p_sampling = values_float_tensor[3].item()
    temperature = values_float_tensor[4].item()
    add_BOS = bool(values_float_tensor[5].item())
    use_eod_token_for_early_termination = bool(values_float_tensor[6].item())
112
113
    stop_on_double_eol = bool(values_float_tensor[7].item())
    stop_on_eol = bool(values_float_tensor[8].item())
114
115
116
117
    random_seed = int(values_float_tensor[9].item())

    if random_seed != -1:
        torch.random.manual_seed(random_seed)
mshoeybi's avatar
working  
mshoeybi committed
118
119
120
121
122

    # Tokenize prompts and get the batch.
    # Note that these tensors are broadcaseted to all ranks.
    if torch.distributed.get_rank() == 0:
        assert prompts is not None
rprenger's avatar
rprenger committed
123
    
mshoeybi's avatar
working  
mshoeybi committed
124
    context_tokens_tensor, context_length_tensor = tokenize_prompts(
mshoeybi's avatar
mshoeybi committed
125
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
mshoeybi's avatar
working  
mshoeybi committed
126

127
    if tokens_to_generate == 0:
128
129
        return score_and_return_on_first_stage(
            model, context_tokens_tensor, context_length_tensor)
130
    
mshoeybi's avatar
working  
mshoeybi committed
131
132
133
134
135
    # Main inference function.
    # Note that the outputs are available on the first stage.
    return generate_tokens_probs_and_return_on_first_stage(
        model, context_tokens_tensor, context_length_tensor,
        return_output_log_probs=return_output_log_probs,
mshoeybi's avatar
mshoeybi committed
136
137
        top_k=top_k_sampling,
        top_p=top_p_sampling,
mshoeybi's avatar
mshoeybi committed
138
        temperature=temperature,
139
140
141
        use_eod_token_for_early_termination=use_eod_token_for_early_termination,
        stop_on_double_eol=stop_on_double_eol,
        stop_on_eol=stop_on_eol)
rprenger's avatar
rprenger committed
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

def beam_search_and_post_process(model,
                                 prompts=None,
                                 tokens_to_generate=0,
                                 beam_size=0,
                                 add_BOS=False):
    """Run beam search and post-process outputs, i.e., detokenize,
    move to cpu and convert to list."""

    # Main inference.
    tokens, scores = beam_search(model,
                                 prompts=prompts,
                                 tokens_to_generate=tokens_to_generate,
                                 beam_size=beam_size,
                                 add_BOS=add_BOS)
    # Only post-process on first stage.
    if mpu.is_pipeline_first_stage():
        lengths = tokens.size(1)*torch.ones(beam_size, dtype=torch.int64, device=torch.cuda.current_device()) 
        tokens, prompts_plus_generations, prompts_plus_generations_segments = detokenize_generations(tokens, lengths, True)
        return prompts_plus_generations, prompts_plus_generations_segments, tokens

    return None

def beam_search(model, prompts=None, tokens_to_generate=0, beam_size=0, add_BOS=False)
    # Make sure input params are avaialble to all ranks.
    values = [tokens_to_generate,
              beam_size,
              add_BOS]
    values_float_tensor = broadcast_float_list(3, float_list=values)
    tokens_to_generate = int(values_float_tensor[0].item())
    beam_size = int(values_float_tensor[1].item())
    add_BOS = bool(values_float_tensor[2].item())

    context_tokens_tensor, context_length_tensor = tokenize_prompts(
        prompts=prompts, tokens_to_generate=tokens_to_generate, add_BOS=add_BOS)
    
    return beam_search_and_return_on_first_stage(model, context_tokens_tensor, context_length_tensor, beam_size)