Commit 01d2667f authored by Bruce MacDonald's avatar Bruce MacDonald
Browse files

add function

parent 74549007
......@@ -50,22 +50,22 @@ models = ollama.models()
Serve the ollama http server
## Cooming Soon
### `ollama.add(filepath)`
### `ollama.pull(model)`
Download a model
Add a model by importing from a file
```python
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
ollama.add("./path/to/model")
```
### `ollama.import(filename)`
## Cooming Soon
Import a model from a file
### `ollama.pull(model)`
Download a model
```python
ollama.import("./path/to/model")
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
```
### `ollama.search("query")`
......
......@@ -23,8 +23,8 @@ def main():
generate_parser.set_defaults(fn=generate)
add_parser = subparsers.add_parser("add")
add_parser.add_argument("model_file")
generate_parser.set_defaults(fn=add)
add_parser.add_argument("file")
add_parser.set_defaults(fn=add)
args = parser.parse_args()
args = vars(args)
......@@ -48,4 +48,4 @@ def generate(*args, **kwargs):
def add(*args, **kwargs):
model.add(*args, **kwargs)
engine.add(*args, **kwargs)
import os
import json
import sys
import shutil
from contextlib import contextmanager
from llama_cpp import Llama as LLM
from template import template
......@@ -61,3 +62,9 @@ def load(model, models_home=".", llms={}):
def unload(model, llms={}):
if model in llms:
llms.pop(model)
def add(file, models_home=".", *args, **kwargs):
if not os.path.exists(file):
raise ValueError("Model file {model} not found")
shutil.move(file, models_home)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment