"tools/python/vscode:/vscode.git/clone" did not exist on "ced9f6f40782eafe411a08dc906bf6a6cd904a75"
Commit 8fa91332 authored by Jeffrey Morgan's avatar Jeffrey Morgan
Browse files

initial commit

parents
module.exports = {
packagerConfig: {
asar: true,
},
rebuildConfig: {},
makers: [
{
name: '@electron-forge/maker-squirrel',
config: {},
},
{
name: '@electron-forge/maker-zip',
platforms: ['darwin'],
},
{
name: '@electron-forge/maker-deb',
config: {},
},
{
name: '@electron-forge/maker-rpm',
config: {},
},
],
plugins: [
{
name: '@electron-forge/plugin-auto-unpack-natives',
config: {},
},
],
}
{
"name": "desktop",
"productName": "desktop",
"version": "1.0.0",
"description": "My Electron application description",
"main": "src/index.js",
"scripts": {
"start": "electron-forge start",
"package": "electron-forge package",
"make": "electron-forge make",
"publish": "electron-forge publish",
"lint": "echo \"No linting configured\""
},
"keywords": [],
"author": {
"name": "Jeffrey Morgan",
"email": "jeff@keypair.com"
},
"license": "MIT",
"dependencies": {
"electron-squirrel-startup": "^1.0.0"
},
"devDependencies": {
"@electron-forge/cli": "^6.2.1",
"@electron-forge/maker-deb": "^6.2.1",
"@electron-forge/maker-rpm": "^6.2.1",
"@electron-forge/maker-squirrel": "^6.2.1",
"@electron-forge/maker-zip": "^6.2.1",
"@electron-forge/plugin-auto-unpack-natives": "^6.2.1",
"electron": "25.1.1"
}
}
const { app, BrowserWindow } = require('electron')
const path = require('path')
// Handle creating/removing shortcuts on Windows when installing/uninstalling.
if (require('electron-squirrel-startup')) {
app.quit()
}
const createWindow = () => {
// Create the browser window.
const mainWindow = new BrowserWindow({
width: 800,
height: 600,
titleBarStyle: 'hidden',
trafficLightPosition: { x: 20, y: 18 },
vibrancy: 'titlebar',
transparent: true,
})
// and load the index.html of the app.
mainWindow.loadURL('http://localhost:3000')
// Open the DevTools.
mainWindow.webContents.openDevTools()
}
// This method will be called when Electron has finished
// initialization and is ready to create browser windows.
// Some APIs can only be used after this event occurs.
app.on('ready', createWindow)
// Quit when all windows are closed, except on macOS. There, it's common
// for applications and their menu bar to stay active until the user quits
// explicitly with Cmd + Q.
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') {
app.quit()
}
})
app.on('activate', () => {
// On OS X it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) {
createWindow()
}
})
// In this file you can include the rest of your app's specific main process
// code. You can also put them in separate files and import them here.
#!/bin/bash
# Function to handle Ctrl+C
handle_sigint() {
kill $pid1 $pid2 $pid3
exit
}
# Trap Ctrl+C signal
trap 'handle_sigint' SIGINT
# Start three processes in the background
npm run dev --prefix ./client & pid1=$!
npm start --prefix ./desktop & pid2=$!
go run -C ./server . & pid3=$!
# Wait for all processes to finish
wait
npm install --prefix ./client
npm install --prefix ./desktop
LIBRARY_PATH=$PWD/go-llama.cpp C_INCLUDE_PATH=$PWD/go-llama.cpp go build .
module github.com/keypairdev/keypair
go 1.20
require (
github.com/go-skynet/go-llama.cpp v0.0.0-20230620192753-7a36befaece1
github.com/sashabaranov/go-openai v1.11.3
)
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-skynet/go-llama.cpp v0.0.0-20230620192753-7a36befaece1 h1:UQ8y3kHxBgh3BnaW06y/X97fEN48yHPwWobMz8/aztU=
github.com/go-skynet/go-llama.cpp v0.0.0-20230620192753-7a36befaece1/go.mod h1:tzi97YvT1bVQ+iTG39LvpDkKG1WbizgtljC+orSoM40=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
github.com/sashabaranov/go-openai v1.11.3 h1:bvwWF8hj4UhPlswBdL9/IfOpaHXfzGCJO8WY8ml9sGc=
github.com/sashabaranov/go-openai v1.11.3/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
package main
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"os"
"runtime"
"github.com/sashabaranov/go-openai"
llama "github.com/go-skynet/go-llama.cpp"
)
type Model interface {
Name() string
Handler(w http.ResponseWriter, r *http.Request)
}
type LLama7B struct {
llama *llama.LLama
}
func NewLLama7B() *LLama7B {
llama, err := llama.New("./models/7B/ggml-model-q4_0.bin", llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(128))
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
os.Exit(1)
}
return &LLama7B{
llama: llama,
}
}
func (l *LLama7B) Name() string {
return "LLaMA 7B"
}
func (m *LLama7B) Handler(w http.ResponseWriter, r *http.Request) {
var text bytes.Buffer
io.Copy(&text, r.Body)
_, err := m.llama.Predict(text.String(), llama.Debug, llama.SetTokenCallback(func(token string) bool {
w.Write([]byte(token))
return true
}), llama.SetTokens(512), llama.SetThreads(runtime.NumCPU()), llama.SetTopK(90), llama.SetTopP(0.86), llama.SetStopWords("llama"))
if err != nil {
fmt.Println("Predict failed:", err.Error())
os.Exit(1)
}
embeds, err := m.llama.Embeddings(text.String())
if err != nil {
fmt.Printf("Embeddings: error %s \n", err.Error())
}
fmt.Printf("Embeddings: %v", embeds)
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
}
type GPT4 struct {
apiKey string
}
func (g *GPT4) Name() string {
return "OpenAI GPT-4"
}
func (g *GPT4) Handler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
client := openai.NewClient("your token")
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("chat completion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusOK)
}
// TODO: add subcommands to spawn different models
func main() {
model := &LLama7B{}
http.HandleFunc("/generate", model.Handler)
fmt.Println("Starting server on :8080")
if err := http.ListenAndServe(":8080", nil); err != nil {
fmt.Printf("Error starting server: %s\n", err)
return
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment