text_generation_server.py 3.14 KB
Newer Older
rprenger's avatar
rprenger committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
15
import datetime
rprenger's avatar
rprenger committed
16
import torch
17
import json
18
from flask import Flask, request, jsonify, current_app
rprenger's avatar
rprenger committed
19
20
21
from flask_restful import Resource, Api
from megatron import get_args
from megatron import mpu
22
from megatron.text_generation_utils import generate
rprenger's avatar
rprenger committed
23
24
25
26
27
28
29
30
31
32

GENERATE_NUM = 0

class MegatronGenerate(Resource):
    def __init__(self, model):
        self.model = model
    
    @staticmethod
    def send_do_generate():
        choice = torch.cuda.LongTensor([GENERATE_NUM])
33
        torch.distributed.broadcast(choice, 0)
34
     
rprenger's avatar
rprenger committed
35
    def put(self):
36
        args = get_args()
37
38
39
        print("request IP: " + str(request.remote_addr))
        print(json.dumps(request.get_json()),flush=True)
        print("current time: ", datetime.datetime.now())
rprenger's avatar
rprenger committed
40
        sentences = request.get_json()["sentences"]
41
42
43
        if len(sentences) > 128:
            return "Maximum number of sentences is 128", 400

44
45
46
47
48
49
50
        tokens_to_generate = 64  # Choosing hopefully sane default.  Full sequence is slow
        if "tokens_to_generate" in request.get_json():
            tokens_to_generate = request.get_json()["tokens_to_generate"]
            if not isinstance(tokens_to_generate, int):
                return "tokens_to_generate must be an integer greater than 0"
            if tokens_to_generate < 1:
                return "tokens_to_generate must be an integer greater than 0"
rprenger's avatar
rprenger committed
51

rprenger's avatar
rprenger committed
52
53
54
55
56
57
        all_probs = False
        if "all_probs" in request.get_json():
            all_probs = request.get_json()["all_probs"]
            if not isinstance(all_probs, bool):
                return "all_probs must be a boolean value"

rprenger's avatar
rprenger committed
58
        MegatronGenerate.send_do_generate()  # Tell other ranks we're doing generate
59
        resp_sentences, resp_sentences_seg, output_logits, full_logits, tokens = generate(self.model, sentences, tokens_to_generate, all_probs) 
rprenger's avatar
rprenger committed
60
61
62
63
        if all_probs:
            return jsonify({"sentences": resp_sentences,
                "segments": resp_sentences_seg,
                "logits": output_logits,
64
65
                "all_logits": full_logits,
                "tokens": tokens})
rprenger's avatar
rprenger committed
66
        
rprenger's avatar
rprenger committed
67
68
69
        return jsonify({"sentences": resp_sentences,
            "segments": resp_sentences_seg,
            "logits": output_logits})
rprenger's avatar
rprenger committed
70
71
72

class MegatronServer(object):
    def __init__(self, model):
73
74
        self.app = Flask(__name__, static_folder='static', static_url_path='')
        self.app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
rprenger's avatar
rprenger committed
75
76
77
78
        api = Api(self.app)
        api.add_resource(MegatronGenerate, '/generate', resource_class_args=[model])

    def run(self, url):
79
        self.app.run(url, threaded=True, debug=False)