Commit 1fcf31b8 authored by Bruce MacDonald's avatar Bruce MacDonald
Browse files

move to contained directory

parent 41419f75
# ollama # Ollama
🙊 - Run models, fast
- Download, manage and import models
## Running ## Install
Install dependencies:
``` ```
pip install -r requirements.txt pip install ollama
``` ```
Put your model in `models/` and run: ## Example quickstart
``` ```python
python3 ollama.py serve import ollama
model_name = "huggingface.co/thebloke/llama-7b-ggml"
model = ollama.pull(model_name)
ollama.load(model)
ollama.generate(model_name, "hi")
``` ```
To run the app: ## Reference
``` ### `ollama.load`
cd desktop
npm install Load a model from a path or a docker image
npm start
```python
ollama.load("model name")
``` ```
## Building ### `ollama.generate("message")`
If using Apple silicon, you need a Python version that supports arm64: Generate a completion
```bash ```python
wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh ollama.generate(model, "hi")
bash Miniforge3-MacOSX-arm64.sh
``` ```
Get the dependencies: ### `ollama.models`
```bash List models
pip install -r requirements.txt
```
models = ollama.models()
``` ```
Then build a binary for your current platform: ### `ollama.serve`
```bash Serve the ollama http server
python3 build.py
```
### Building the app ## Cooing Soon
### `ollama.pull`
Examples:
```python
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
``` ```
cd desktop
npm run package
```
## API ### `ollama.import`
Import an existing model into the model store
```python
ollama.import("./path/to/model")
```
### `GET /models` ### `ollama.search`
Returns a list of available models Search for compatible models that Ollama can run
### `POST /generate` ```python
ollama.search("llama-7b")
```
Generates completions as a series of JSON objects ## Future CLI
model: `string` - The name of the model to use in the `models` folder. ```
prompt: `string` - The prompt to use. ollama run huggingface.co/thebloke/llama-7b-ggml
```
...@@ -2,6 +2,9 @@ import json ...@@ -2,6 +2,9 @@ import json
import os import os
import threading import threading
import click import click
from transformers import AutoModel
from tqdm import tqdm
from pathlib import Path
from llama_cpp import Llama from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request from flask import Flask, Response, stream_with_context, request
from flask_cors import CORS from flask_cors import CORS
...@@ -15,33 +18,81 @@ llms = {} ...@@ -15,33 +18,81 @@ llms = {}
lock = threading.Lock() lock = threading.Lock()
def load(model): def models_directory():
home_dir = Path.home()
models_dir = home_dir / ".ollama/models"
if not models_dir.exists():
models_dir.mkdir(parents=True)
return models_dir
def load(model=None, path=None):
"""
Load a model.
The model can be specified by providing either the path or the model name,
but not both. If both are provided, this function will raise a ValueError.
If the model does not exist or could not be loaded, this function returns an error.
Args:
model (str, optional): The name of the model to load.
path (str, optional): The path to the model file.
Returns:
dict or None: If the model cannot be loaded, a dictionary with an 'error' key is returned.
If the model is successfully loaded, None is returned.
"""
with lock: with lock:
if not os.path.exists(f"{model}"): if path is not None and model is not None:
return {"error": "The model does not exist."} raise ValueError(
if model not in llms: "Both path and model are specified. Please provide only one of them."
llms[model] = Llama(model_path=f"{model}") )
elif path is not None:
name = os.path.basename(path)
load_from = path
elif model is not None:
name = model
dir = models_directory()
load_from = str(dir / f"{model}.bin")
else:
raise ValueError("Either path or model must be specified.")
if not os.path.exists(load_from):
return {"error": f"The model at {load_from} does not exist."}
if name not in llms:
# TODO: download model from a repository if it does not exist
llms[name] = Llama(model_path=load_from)
# TODO: this should start a persistent instance of ollama with the model loaded
return None return None
def unload(model): def unload(model):
with lock: """
if not os.path.exists(f"{model}"): Unload a model.
return {"error": "The model does not exist."}
Remove a model from the list of loaded models. If the model is not loaded, this is a no-op.
Args:
model (str): The name of the model to unload.
"""
llms.pop(model, None) llms.pop(model, None)
return None
def query(model, prompt): def generate(model, prompt):
# auto load # auto load
error = load(model) error = load(model)
print(error)
if error is not None: if error is not None:
return error return error
generated = llms[model]( generated = llms[model](
str(prompt), # TODO: optimize prompt based on model str(prompt), # TODO: optimize prompt based on model
max_tokens=4096, max_tokens=4096,
stop=["Q:", "\n"], stop=["Q:", "\n"],
echo=True,
stream=True, stream=True,
) )
for output in generated: for output in generated:
...@@ -49,7 +100,8 @@ def query(model, prompt): ...@@ -49,7 +100,8 @@ def query(model, prompt):
def models(): def models():
all_files = os.listdir("./models") dir = models_directory()
all_files = os.listdir(dir)
bin_files = [ bin_files = [
file.replace(".bin", "") for file in all_files if file.endswith(".bin") file.replace(".bin", "") for file in all_files if file.endswith(".bin")
] ]
...@@ -74,9 +126,7 @@ def unload_route_handler(): ...@@ -74,9 +126,7 @@ def unload_route_handler():
model = data.get("model") model = data.get("model")
if not model: if not model:
return Response("Model is required", status=400) return Response("Model is required", status=400)
error = unload(model) unload(model)
if error is not None:
return error
return Response(status=204) return Response(status=204)
...@@ -92,7 +142,7 @@ def generate_route_handler(): ...@@ -92,7 +142,7 @@ def generate_route_handler():
if not os.path.exists(f"{model}"): if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."}, 400 return {"error": "The model does not exist."}, 400
return Response( return Response(
stream_with_context(query(model, prompt)), mimetype="text/event-stream" stream_with_context(generate(model, prompt)), mimetype="text/event-stream"
) )
...@@ -118,20 +168,56 @@ def serve(port, debug): ...@@ -118,20 +168,56 @@ def serve(port, debug):
app.run(host="0.0.0.0", port=port, debug=debug) app.run(host="0.0.0.0", port=port, debug=debug)
@cli.command() @cli.command(name="load")
@click.option("--model", default="vicuna-7b-v1.3.ggmlv3.q8_0", help="The model to use") @click.argument("model")
@click.option("--file", default=False, help="Indicates that a file path is provided")
def load_cli(model, file):
if file:
error = load(path=model)
else:
error = load(model)
if error is not None:
print(error)
return
print("Model loaded")
@cli.command(name="generate")
@click.argument("model")
@click.option("--prompt", default="", help="The prompt for the model") @click.option("--prompt", default="", help="The prompt for the model")
def generate(model, prompt): def generate_cli(model, prompt):
if prompt == "": if prompt == "":
prompt = input("Prompt: ") prompt = input("Prompt: ")
output = "" output = ""
prompt = template(model, prompt) prompt = template(model, prompt)
for generated in query(model, prompt): for generated in generate(model, prompt):
generated_json = json.loads(generated) generated_json = json.loads(generated)
text = generated_json["choices"][0]["text"] text = generated_json["choices"][0]["text"]
output += text output += text
print(f"\r{output}", end="", flush=True) print(f"\r{output}", end="", flush=True)
def download_model(model_name):
dir = models_directory()
AutoModel.from_pretrained(model_name, cache_dir=dir)
@cli.command(name="models")
def models_cli():
print(models())
@cli.command(name="pull")
@click.argument("model")
def pull_cli(model):
print("not implemented")
@cli.command(name="import")
@click.argument("model")
def import_cli(model):
print("not implemented")
if __name__ == "__main__": if __name__ == "__main__":
cli() cli()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment