text_generation_server.py 3.8 KB
Newer Older
rprenger's avatar
rprenger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
import datetime
rprenger's avatar
rprenger committed
16
import torch
17
import json
18
import threading
19
from flask import Flask, request, jsonify, current_app
rprenger's avatar
rprenger committed
20
21
22
from flask_restful import Resource, Api
from megatron import get_args
from megatron import mpu
23
from megatron.text_generation_utils import generate
rprenger's avatar
rprenger committed
24
25

GENERATE_NUM = 0
rprenger's avatar
rprenger committed
26
lock = threading.Lock()
rprenger's avatar
rprenger committed
27
28
29
30

class MegatronGenerate(Resource):
    def __init__(self, model):
        self.model = model
31

rprenger's avatar
rprenger committed
32
33
34
    @staticmethod
    def send_do_generate():
        choice = torch.cuda.LongTensor([GENERATE_NUM])
35
        torch.distributed.broadcast(choice, 0)
36
     
rprenger's avatar
rprenger committed
37
    def put(self):
38
        args = get_args()
39
40
41
        print("request IP: " + str(request.remote_addr))
        print(json.dumps(request.get_json()),flush=True)
        print("current time: ", datetime.datetime.now())
42
        
rprenger's avatar
rprenger committed
43
        sentences = request.get_json()["sentences"]
44
45
46
        if len(sentences) > 128:
            return "Maximum number of sentences is 128", 400

47
48
49
50
51
52
53
        tokens_to_generate = 64  # Choosing hopefully sane default.  Full sequence is slow
        if "tokens_to_generate" in request.get_json():
            tokens_to_generate = request.get_json()["tokens_to_generate"]
            if not isinstance(tokens_to_generate, int):
                return "tokens_to_generate must be an integer greater than 0"
            if tokens_to_generate < 1:
                return "tokens_to_generate must be an integer greater than 0"
rprenger's avatar
rprenger committed
54

rprenger's avatar
rprenger committed
55
56
57
58
59
        all_probs = False
        if "all_probs" in request.get_json():
            all_probs = request.get_json()["all_probs"]
            if not isinstance(all_probs, bool):
                return "all_probs must be a boolean value"
60
        
61
62
63
64
65
66
        temperature = args.temperature
        if "temperature" in request.get_json():
            temperature = request.get_json()["temperature"]
            if not isinstance(temperature, float) or not \
               0.0 < temperature <= 100.0:
                return "temperature must be a positive float less than or equal to 100.0"
rprenger's avatar
rprenger committed
67
        
68
69
70
71
72
        add_BOS = False
        if "add_BOS" in request.get_json():
            add_BOS = request.get_json()["add_BOS"]
            if not isinstance(add_BOS, bool):
                return "add_BOS must be a boolean value"
rprenger's avatar
rprenger committed
73

rprenger's avatar
rprenger committed
74
75
76
        with lock:  # Need to get lock to keep multiple threads from hitting code
            MegatronGenerate.send_do_generate()  # Tell other ranks we're doing generate
            resp_sentences, resp_sentences_seg, output_logits, full_logits, tokens = generate(self.model, sentences, tokens_to_generate, all_probs, temperature, add_BOS) 
77
        
rprenger's avatar
rprenger committed
78
79
80
81
        if all_probs:
            return jsonify({"sentences": resp_sentences,
                "segments": resp_sentences_seg,
                "logits": output_logits,
82
83
                "all_logits": full_logits,
                "tokens": tokens})
rprenger's avatar
rprenger committed
84
        
rprenger's avatar
rprenger committed
85
86
87
        return jsonify({"sentences": resp_sentences,
            "segments": resp_sentences_seg,
            "logits": output_logits})
rprenger's avatar
rprenger committed
88
89
90

class MegatronServer(object):
    def __init__(self, model):
91
        self.app = Flask(__name__, static_url_path='')
rprenger's avatar
rprenger committed
92
93
        api = Api(self.app)
        api.add_resource(MegatronGenerate, '/generate', resource_class_args=[model])
94
95
        
    def run(self, url): 
96
        self.app.run(url, threaded=True, debug=False)