Commit 8ea5e5e1 authored by Bruce MacDonald's avatar Bruce MacDonald Committed by Jeffrey Morgan
Browse files

separate routes

parent fd962a36
...@@ -14,54 +14,63 @@ import ( ...@@ -14,54 +14,63 @@ import (
"github.com/jmorganca/ollama/api" "github.com/jmorganca/ollama/api"
) )
func Serve(ln net.Listener) error { func pull(c *gin.Context) {
r := gin.Default() // TODO
c.JSON(http.StatusOK, gin.H{"message": "ok"})
}
func generate(c *gin.Context) {
// TODO: these should be request parameters // TODO: these should be request parameters
gpulayers := 0 gpulayers := 0
tokens := 512 tokens := 512
threads := runtime.NumCPU() threads := runtime.NumCPU()
// TODO: set prompt from template
fmt.Println("Generating text...")
r.POST("/api/generate", func(c *gin.Context) { var req api.GenerateRequest
// TODO: set prompt from template if err := c.ShouldBindJSON(&req); err != nil {
fmt.Println("Generating text...") c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
var req api.GenerateRequest fmt.Println(req)
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
return if err != nil {
} fmt.Println("Loading the model failed:", err.Error())
return
}
fmt.Println(req) ch := make(chan string)
l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers)) go func() {
defer close(ch)
_, err := l.Predict(req.Prompt, llama.Debug, llama.SetTokenCallback(func(token string) bool {
ch <- token
return true
}), llama.SetTokens(tokens), llama.SetThreads(threads), llama.SetTopK(90), llama.SetTopP(0.86), llama.SetStopWords("llama"))
if err != nil { if err != nil {
fmt.Println("Loading the model failed:", err.Error()) panic(err)
return
} }
}()
ch := make(chan string) c.Stream(func(w io.Writer) bool {
tok, ok := <-ch
go func() { if !ok {
defer close(ch) return false
_, err := l.Predict(req.Prompt, llama.Debug, llama.SetTokenCallback(func(token string) bool { }
ch <- token c.SSEvent("token", tok)
return true return true
}), llama.SetTokens(tokens), llama.SetThreads(threads), llama.SetTopK(90), llama.SetTopP(0.86), llama.SetStopWords("llama"))
if err != nil {
panic(err)
}
}()
c.Stream(func(w io.Writer) bool {
tok, ok := <-ch
if !ok {
return false
}
c.SSEvent("token", tok)
return true
})
}) })
}
func Serve(ln net.Listener) error {
r := gin.Default()
r.POST("api/pull", pull)
r.POST("/api/generate", generate)
log.Printf("Listening on %s", ln.Addr()) log.Printf("Listening on %s", ln.Addr())
s := &http.Server{ s := &http.Server{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment