proto.py 3.65 KB
Newer Older
Bruce MacDonald's avatar
Bruce MacDonald committed
1
2
import json
import os
Bruce MacDonald's avatar
Bruce MacDonald committed
3
import threading
Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
4
import click
Bruce MacDonald's avatar
Bruce MacDonald committed
5
6
from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request
Bruce MacDonald's avatar
Bruce MacDonald committed
7
from flask_cors import CORS
8
from template import template
Bruce MacDonald's avatar
Bruce MacDonald committed
9
10
11
12
13
14

app = Flask(__name__)
CORS(app)  # enable CORS for all routes

# llms tracks which models are loaded
llms = {}
Bruce MacDonald's avatar
Bruce MacDonald committed
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
lock = threading.Lock()


def load(model):
    with lock:
        if not os.path.exists(f"./models/{model}.bin"):
            return {"error": "The model does not exist."}
        if model not in llms:
            llms[model] = Llama(model_path=f"./models/{model}.bin")
    return None


def unload(model):
    with lock:
        if not os.path.exists(f"./models/{model}.bin"):
            return {"error": "The model does not exist."}
        llms.pop(model, None)
    return None


Bruce MacDonald's avatar
Bruce MacDonald committed
35
def query(model, prompt):
Bruce MacDonald's avatar
Bruce MacDonald committed
36
37
38
39
    # auto load
    error = load(model)
    if error is not None:
        return error
Bruce MacDonald's avatar
Bruce MacDonald committed
40
    generated = llms[model](
Bruce MacDonald's avatar
Bruce MacDonald committed
41
42
43
44
45
46
        str(prompt),  # TODO: optimize prompt based on model
        max_tokens=4096,
        stop=["Q:", "\n"],
        echo=True,
        stream=True,
    )
Bruce MacDonald's avatar
Bruce MacDonald committed
47
    for output in generated:
Bruce MacDonald's avatar
Bruce MacDonald committed
48
49
50
51
52
53
54
55
56
        yield json.dumps(output)


def models():
    all_files = os.listdir("./models")
    bin_files = [
        file.replace(".bin", "") for file in all_files if file.endswith(".bin")
    ]
    return bin_files
Bruce MacDonald's avatar
Bruce MacDonald committed
57
58


59
@app.route("/load", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
60
def load_route_handler():
61
62
63
64
    data = request.get_json()
    model = data.get("model")
    if not model:
        return Response("Model is required", status=400)
Bruce MacDonald's avatar
Bruce MacDonald committed
65
66
67
    error = load(model)
    if error is not None:
        return error
68
69
70
71
    return Response(status=204)


@app.route("/unload", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
72
def unload_route_handler():
73
74
75
76
    data = request.get_json()
    model = data.get("model")
    if not model:
        return Response("Model is required", status=400)
Bruce MacDonald's avatar
Bruce MacDonald committed
77
78
79
    error = unload(model)
    if error is not None:
        return error
80
81
82
    return Response(status=204)


Bruce MacDonald's avatar
Bruce MacDonald committed
83
@app.route("/generate", methods=["POST"])
Bruce MacDonald's avatar
Bruce MacDonald committed
84
def generate_route_handler():
Bruce MacDonald's avatar
Bruce MacDonald committed
85
86
87
88
89
90
91
    data = request.get_json()
    model = data.get("model")
    prompt = data.get("prompt")
    if not model:
        return Response("Model is required", status=400)
    if not prompt:
        return Response("Prompt is required", status=400)
Jeffrey Morgan's avatar
Jeffrey Morgan committed
92
    if not os.path.exists(f"./models/{model}.bin"):
93
        return {"error": "The model does not exist."}, 400
Bruce MacDonald's avatar
Bruce MacDonald committed
94
    return Response(
Bruce MacDonald's avatar
Bruce MacDonald committed
95
        stream_with_context(query(model, prompt)), mimetype="text/event-stream"
Bruce MacDonald's avatar
Bruce MacDonald committed
96
97
    )

Bruce MacDonald's avatar
Bruce MacDonald committed
98

Jeffrey Morgan's avatar
Jeffrey Morgan committed
99
@app.route("/models", methods=["GET"])
Bruce MacDonald's avatar
Bruce MacDonald committed
100
101
def models_route_handler():
    bin_files = models()
Jeffrey Morgan's avatar
Jeffrey Morgan committed
102
103
    return Response(json.dumps(bin_files), mimetype="application/json")

Bruce MacDonald's avatar
Bruce MacDonald committed
104

Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
    # allows the script to respond to command line input when executed directly
    if ctx.invoked_subcommand is None:
        click.echo(ctx.get_help())


@cli.command()
@click.option("--port", default=5000, help="Port to run the server on")
@click.option("--debug", default=False, help="Enable debug mode")
def serve(port, debug):
    print("Serving on http://localhost:{port}")
    app.run(host="0.0.0.0", port=port, debug=debug)


Bruce MacDonald's avatar
Bruce MacDonald committed
121
122
123
124
125
126
127
@cli.command()
@click.option("--model", default="vicuna-7b-v1.3.ggmlv3.q8_0", help="The model to use")
@click.option("--prompt", default="", help="The prompt for the model")
def generate(model, prompt):
    if prompt == "":
        prompt = input("Prompt: ")
    output = ""
128
    prompt = template(model, prompt)
Bruce MacDonald's avatar
Bruce MacDonald committed
129
130
131
132
133
134
135
    for generated in query(model, prompt):
        generated_json = json.loads(generated)
        text = generated_json["choices"][0]["text"]
        output += text
        print(f"\r{output}", end="", flush=True)


Bruce MacDonald's avatar
Bruce MacDonald committed
136
if __name__ == "__main__":
Bruce MacDonald's avatar
add cli  
Bruce MacDonald committed
137
    cli()