proto.py 3.58 KB
Newer Older
Bruce MacDonald's avatar
Bruce MacDonald committed
1
2
import json
import os
Bruce MacDonald's avatar
Bruce MacDonald committed
3
import threading
Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
4
import click
Bruce MacDonald's avatar
Bruce MacDonald committed
5
6
from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request
Bruce MacDonald's avatar
Bruce MacDonald committed
7
from flask_cors import CORS
Bruce MacDonald's avatar
Bruce MacDonald committed
8
9
10
11
12
13

app = Flask(__name__)
CORS(app)  # enable CORS for all routes

# llms tracks which models are loaded
llms = {}
Bruce MacDonald's avatar
Bruce MacDonald committed
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
lock = threading.Lock()


def load(model):
    with lock:
        if not os.path.exists(f"./models/{model}.bin"):
            return {"error": "The model does not exist."}
        if model not in llms:
            llms[model] = Llama(model_path=f"./models/{model}.bin")
    return None


def unload(model):
    with lock:
        if not os.path.exists(f"./models/{model}.bin"):
            return {"error": "The model does not exist."}
        llms.pop(model, None)
    return None


Bruce MacDonald's avatar
Bruce MacDonald committed
34
def query(model, prompt):
Bruce MacDonald's avatar
Bruce MacDonald committed
35
36
37
38
    # auto load
    error = load(model)
    if error is not None:
        return error
Bruce MacDonald's avatar
Bruce MacDonald committed
39
    generated = llms[model](
Bruce MacDonald's avatar
Bruce MacDonald committed
40
41
42
43
44
45
        str(prompt),  # TODO: optimize prompt based on model
        max_tokens=4096,
        stop=["Q:", "\n"],
        echo=True,
        stream=True,
    )
Bruce MacDonald's avatar
Bruce MacDonald committed
46
    for output in generated:
Bruce MacDonald's avatar
Bruce MacDonald committed
47
48
49
50
51
52
53
54
55
        yield json.dumps(output)


def models():
    all_files = os.listdir("./models")
    bin_files = [
        file.replace(".bin", "") for file in all_files if file.endswith(".bin")
    ]
    return bin_files
Bruce MacDonald's avatar
Bruce MacDonald committed
56
57


58
@app.route("/load", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
59
def load_route_handler():
60
61
62
63
    data = request.get_json()
    model = data.get("model")
    if not model:
        return Response("Model is required", status=400)
Bruce MacDonald's avatar
Bruce MacDonald committed
64
65
66
    error = load(model)
    if error is not None:
        return error
67
68
69
70
    return Response(status=204)


@app.route("/unload", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
71
def unload_route_handler():
72
73
74
75
    data = request.get_json()
    model = data.get("model")
    if not model:
        return Response("Model is required", status=400)
Bruce MacDonald's avatar
Bruce MacDonald committed
76
77
78
    error = unload(model)
    if error is not None:
        return error
79
80
81
    return Response(status=204)


Bruce MacDonald's avatar
Bruce MacDonald committed
82
@app.route("/generate", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
83
def generate_route_handler():
Bruce MacDonald's avatar
Bruce MacDonald committed
84
85
86
87
88
89
90
    data = request.get_json()
    model = data.get("model")
    prompt = data.get("prompt")
    if not model:
        return Response("Model is required", status=400)
    if not prompt:
        return Response("Prompt is required", status=400)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
91
    if not os.path.exists(f"./models/{model}.bin"):
92
        return {"error": "The model does not exist."}, 400
Bruce MacDonald's avatar
Bruce MacDonald committed
93
    return Response(
Bruce MacDonald's avatar
Bruce MacDonald committed
94
        stream_with_context(query(model, prompt)), mimetype="text/event-stream"
Bruce MacDonald's avatar
Bruce MacDonald committed
95
96
    )

Bruce MacDonald's avatar
Bruce MacDonald committed
97

Jeffrey Morgan's avatar
Jeffrey Morgan committed
98
@app.route("/models", methods=["GET"])
Bruce MacDonald's avatar
Bruce MacDonald committed
99
100
def models_route_handler():
    bin_files = models()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
101
102
    return Response(json.dumps(bin_files), mimetype="application/json")

Bruce MacDonald's avatar
Bruce MacDonald committed
103

Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
    # allows the script to respond to command line input when executed directly
    if ctx.invoked_subcommand is None:
        click.echo(ctx.get_help())


@cli.command()
@click.option("--port", default=5000, help="Port to run the server on")
@click.option("--debug", default=False, help="Enable debug mode")
def serve(port, debug):
    print("Serving on http://localhost:{port}")
    app.run(host="0.0.0.0", port=port, debug=debug)


Bruce MacDonald's avatar
Bruce MacDonald committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
@cli.command()
@click.option("--model", default="vicuna-7b-v1.3.ggmlv3.q8_0", help="The model to use")
@click.option("--prompt", default="", help="The prompt for the model")
def generate(model, prompt):
    if prompt == "":
        prompt = input("Prompt: ")
    output = ""
    for generated in query(model, prompt):
        generated_json = json.loads(generated)
        text = generated_json["choices"][0]["text"]
        output += text
        print(f"\r{output}", end="", flush=True)


Bruce MacDonald's avatar
Bruce MacDonald committed
134
if __name__ == "__main__":
Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
135
    cli()