Commit 1fcf31b8 authored by Bruce MacDonald's avatar Bruce MacDonald
Browse files

move to contained directory

parent 41419f75
# ollama
# Ollama
🙊
- Run models, fast
- Download, manage and import models
## Running
Install dependencies:
## Install
```
pip install -r requirements.txt
pip install ollama
```
Put your model in `models/` and run:
## Example quickstart
```
python3 ollama.py serve
```python
import ollama
model_name = "huggingface.co/thebloke/llama-7b-ggml"
model = ollama.pull(model_name)
ollama.load(model)
ollama.generate(model_name, "hi")
```
To run the app:
## Reference
```
cd desktop
npm install
npm start
### `ollama.load`
Load a model from a path or a docker image
```python
ollama.load("model name")
```
## Building
### `ollama.generate("message")`
If using Apple silicon, you need a Python version that supports arm64:
Generate a completion
```bash
wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh
bash Miniforge3-MacOSX-arm64.sh
```python
ollama.generate(model, "hi")
```
Get the dependencies:
### `ollama.models`
```bash
pip install -r requirements.txt
List models
```
models = ollama.models()
```
Then build a binary for your current platform:
### `ollama.serve`
```bash
python3 build.py
```
Serve the ollama http server
### Building the app
## Cooing Soon
### `ollama.pull`
Examples:
```python
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
```
cd desktop
npm run package
```
## API
### `ollama.import`
Import an existing model into the model store
```python
ollama.import("./path/to/model")
```
### `GET /models`
### `ollama.search`
Returns a list of available models
Search for compatible models that Ollama can run
### `POST /generate`
```python
ollama.search("llama-7b")
```
Generates completions as a series of JSON objects
## Future CLI
model: `string` - The name of the model to use in the `models` folder.
prompt: `string` - The prompt to use.
```
ollama run huggingface.co/thebloke/llama-7b-ggml
```
......@@ -2,6 +2,9 @@ import json
import os
import threading
import click
from transformers import AutoModel
from tqdm import tqdm
from pathlib import Path
from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request
from flask_cors import CORS
......@@ -15,33 +18,81 @@ llms = {}
lock = threading.Lock()
def load(model):
def models_directory():
home_dir = Path.home()
models_dir = home_dir / ".ollama/models"
if not models_dir.exists():
models_dir.mkdir(parents=True)
return models_dir
def load(model=None, path=None):
"""
Load a model.
The model can be specified by providing either the path or the model name,
but not both. If both are provided, this function will raise a ValueError.
If the model does not exist or could not be loaded, this function returns an error.
Args:
model (str, optional): The name of the model to load.
path (str, optional): The path to the model file.
Returns:
dict or None: If the model cannot be loaded, a dictionary with an 'error' key is returned.
If the model is successfully loaded, None is returned.
"""
with lock:
if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."}
if model not in llms:
llms[model] = Llama(model_path=f"{model}")
if path is not None and model is not None:
raise ValueError(
"Both path and model are specified. Please provide only one of them."
)
elif path is not None:
name = os.path.basename(path)
load_from = path
elif model is not None:
name = model
dir = models_directory()
load_from = str(dir / f"{model}.bin")
else:
raise ValueError("Either path or model must be specified.")
if not os.path.exists(load_from):
return {"error": f"The model at {load_from} does not exist."}
if name not in llms:
# TODO: download model from a repository if it does not exist
llms[name] = Llama(model_path=load_from)
# TODO: this should start a persistent instance of ollama with the model loaded
return None
def unload(model):
with lock:
if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."}
llms.pop(model, None)
return None
"""
Unload a model.
Remove a model from the list of loaded models. If the model is not loaded, this is a no-op.
Args:
model (str): The name of the model to unload.
"""
llms.pop(model, None)
def query(model, prompt):
def generate(model, prompt):
# auto load
error = load(model)
print(error)
if error is not None:
return error
generated = llms[model](
str(prompt), # TODO: optimize prompt based on model
max_tokens=4096,
stop=["Q:", "\n"],
echo=True,
stream=True,
)
for output in generated:
......@@ -49,7 +100,8 @@ def query(model, prompt):
def models():
all_files = os.listdir("./models")
dir = models_directory()
all_files = os.listdir(dir)
bin_files = [
file.replace(".bin", "") for file in all_files if file.endswith(".bin")
]
......@@ -74,9 +126,7 @@ def unload_route_handler():
model = data.get("model")
if not model:
return Response("Model is required", status=400)
error = unload(model)
if error is not None:
return error
unload(model)
return Response(status=204)
......@@ -92,7 +142,7 @@ def generate_route_handler():
if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."}, 400
return Response(
stream_with_context(query(model, prompt)), mimetype="text/event-stream"
stream_with_context(generate(model, prompt)), mimetype="text/event-stream"
)
......@@ -118,20 +168,56 @@ def serve(port, debug):
app.run(host="0.0.0.0", port=port, debug=debug)
@cli.command()
@click.option("--model", default="vicuna-7b-v1.3.ggmlv3.q8_0", help="The model to use")
@cli.command(name="load")
@click.argument("model")
@click.option("--file", default=False, help="Indicates that a file path is provided")
def load_cli(model, file):
if file:
error = load(path=model)
else:
error = load(model)
if error is not None:
print(error)
return
print("Model loaded")
@cli.command(name="generate")
@click.argument("model")
@click.option("--prompt", default="", help="The prompt for the model")
def generate(model, prompt):
def generate_cli(model, prompt):
if prompt == "":
prompt = input("Prompt: ")
output = ""
prompt = template(model, prompt)
for generated in query(model, prompt):
for generated in generate(model, prompt):
generated_json = json.loads(generated)
text = generated_json["choices"][0]["text"]
output += text
print(f"\r{output}", end="", flush=True)
def download_model(model_name):
dir = models_directory()
AutoModel.from_pretrained(model_name, cache_dir=dir)
@cli.command(name="models")
def models_cli():
print(models())
@cli.command(name="pull")
@click.argument("model")
def pull_cli(model):
print("not implemented")
@cli.command(name="import")
@click.argument("model")
def import_cli(model):
print("not implemented")
if __name__ == "__main__":
cli()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment