"examples/dreambooth/train_dreambooth_lora_flux.py" did not exist on "5f150c4cef2140342c66ef44ba8f36b5844a51b1"
Unverified Commit 0ddb2b32 authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge pull request #413 from ollama-webui/main

dev
parents 880f58e8 ed1d9e61
version: '3.8'
services:
ollama:
volumes:
- ${OLLAMA_DATA_DIR-./ollama-data}:/root/.ollama
version: '3.6' version: '3.8'
services: services:
ollama: ollama:
...@@ -7,7 +7,7 @@ services: ...@@ -7,7 +7,7 @@ services:
resources: resources:
reservations: reservations:
devices: devices:
- driver: nvidia - driver: ${OLLAMA_GPU_DRIVER-nvidia}
count: 1 count: ${OLLAMA_GPU_COUNT-1}
capabilities: capabilities:
- gpu - gpu
version: '3.6' version: '3.8'
services: services:
ollama: ollama:
...@@ -16,16 +16,16 @@ services: ...@@ -16,16 +16,16 @@ services:
args: args:
OLLAMA_API_BASE_URL: '/ollama/api' OLLAMA_API_BASE_URL: '/ollama/api'
dockerfile: Dockerfile dockerfile: Dockerfile
image: ollama-webui:latest image: ghcr.io/ollama-webui/ollama-webui:main
container_name: ollama-webui container_name: ollama-webui
volumes: volumes:
- ollama-webui:/app/backend/data - ollama-webui:/app/backend/data
depends_on: depends_on:
- ollama - ollama
ports: ports:
- 3000:8080 - ${OLLAMA_WEBUI_PORT-3000}:8080
environment: environment:
- "OLLAMA_API_BASE_URL=http://ollama:11434/api" - 'OLLAMA_API_BASE_URL=http://ollama:11434/api'
extra_hosts: extra_hosts:
- host.docker.internal:host-gateway - host.docker.internal:host-gateway
restart: unless-stopped restart: unless-stopped
......
# Project workflow
[![](https://mermaid.ink/img/pako:eNq1k01rAjEQhv_KkFNLFe1N9iAUevFSRVl6Cci4Gd1ANtlmsmtF_O_N7iqtHxR76ClhMu87zwyZvcicIpEIpo-KbEavGjceC2lL9EFnukQbIGXygNye5y9TY7DAZTpZLsjXXVYXg3dapRM4hh9mu5A7-3hTfSXtAtJK21Tsj8dPl3USmJZkGVbebWNKD2rNOjAYl6HJHYdkNBwNpb3U9aNZvzFNYE6h8tFiSyZzBUGJG4K1dwVwTSYQrCptlLRvLt5dA5i2la5Ruk51Ux0VKQjuxPVbAwuyiuFlNgHfzJ5DoxtgqQf1813gnZRLZ5lAYcD7WT1lpGtiQKug9C4jZrrp-Fd-1-Y1bdzo4dvnZDLz7lPHyj8sOgfg4x84E7RTuEaZt8yRZqtDfgT_rwG2u3Dv_ERPFOQL1Cqu2F5aAClCTgVJkcSrojVWJkgh7SGmYhXcYmczkQRfUU9UZfQ4baRI1miYDl_QqlPg?type=png)](https://mermaid.live/edit#pako:eNq1k01rAjEQhv_KkFNLFe1N9iAUevFSRVl6Cci4Gd1ANtlmsmtF_O_N7iqtHxR76ClhMu87zwyZvcicIpEIpo-KbEavGjceC2lL9EFnukQbIGXygNye5y9TY7DAZTpZLsjXXVYXg3dapRM4hh9mu5A7-3hTfSXtAtJK21Tsj8dPl3USmJZkGVbebWNKD2rNOjAYl6HJHYdkNBwNpb3U9aNZvzFNYE6h8tFiSyZzBUGJG4K1dwVwTSYQrCptlLRvLt5dA5i2la5Ruk51Ux0VKQjuxPVbAwuyiuFlNgHfzJ5DoxtgqQf1813gnZRLZ5lAYcD7WT1lpGtiQKug9C4jZrrp-Fd-1-Y1bdzo4dvnZDLz7lPHyj8sOgfg4x84E7RTuEaZt8yRZqtDfgT_rwG2u3Dv_ERPFOQL1Cqu2F5aAClCTgVJkcSrojVWJkgh7SGmYhXcYmczkQRfUU9UZfQ4baRI1miYDl_QqlPg)
# If you're serving both the frontend and backend (Recommended)
# Set the public API base URL for seamless communication
PUBLIC_API_BASE_URL='/ollama/api'
# If you're serving only the frontend (Not recommended and not fully supported)
# Comment above and Uncomment below
# You can use the default value or specify a custom path, e.g., '/api'
# PUBLIC_API_BASE_URL='http://{location.hostname}:11434/api'
# Ollama URL for the backend to connect # Ollama URL for the backend to connect
# The path '/ollama/api' will be redirected to the specified backend URL # The path '/ollama/api' will be redirected to the specified backend URL
OLLAMA_API_BASE_URL='http://localhost:11434/api' OLLAMA_API_BASE_URL='http://localhost:11434/api'
OPENAI_API_BASE_URL=''
OPENAI_API_KEY=''
\ No newline at end of file
apiVersion: v2
name: ollama-webui
description: "Ollama Web UI: A User-Friendly Web Interface for Chat Interactions 👋"
version: 1.0.0
icon: https://raw.githubusercontent.com/ollama-webui/ollama-webui/main/static/favicon.png
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Values.namespace }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: ollama-service
namespace: {{ .Values.namespace }}
spec:
type: {{ .Values.ollama.service.type }}
selector:
app: ollama
ports:
- protocol: TCP
port: {{ .Values.ollama.servicePort }}
targetPort: {{ .Values.ollama.servicePort }}
\ No newline at end of file
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ollama
namespace: {{ .Values.namespace }}
spec:
serviceName: "ollama"
replicas: {{ .Values.ollama.replicaCount }}
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
containers:
- name: ollama
image: {{ .Values.ollama.image }}
ports:
- containerPort: {{ .Values.ollama.servicePort }}
env:
{{- if .Values.ollama.gpu.enabled }}
- name: PATH
value: /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
- name: LD_LIBRARY_PATH
value: /usr/local/nvidia/lib:/usr/local/nvidia/lib64
- name: NVIDIA_DRIVER_CAPABILITIES
value: compute,utility
{{- end}}
{{- if .Values.ollama.resources }}
resources: {{- toYaml .Values.ollama.resources | nindent 10 }}
{{- end }}
volumeMounts:
- name: ollama-volume
mountPath: /root/.ollama
tty: true
{{- with .Values.ollama.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
tolerations:
{{- if .Values.ollama.gpu.enabled }}
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule
{{- end }}
volumeClaimTemplates:
- metadata:
name: ollama-volume
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: {{ .Values.ollama.volumeSize }}
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-webui-deployment
namespace: {{ .Values.namespace }}
spec:
replicas: 1
selector:
matchLabels:
app: ollama-webui
template:
metadata:
labels:
app: ollama-webui
spec:
containers:
- name: ollama-webui
image: {{ .Values.webui.image }}
ports:
- containerPort: 8080
{{- if .Values.webui.resources }}
resources: {{- toYaml .Values.webui.resources | nindent 10 }}
{{- end }}
volumeMounts:
- name: webui-volume
mountPath: /app/backend/data
env:
- name: OLLAMA_API_BASE_URL
value: "http://ollama-service.{{ .Values.namespace }}.svc.cluster.local:{{ .Values.ollama.servicePort }}/api"
tty: true
{{- with .Values.webui.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: webui-volume
persistentVolumeClaim:
claimName: ollama-webui-pvc
\ No newline at end of file
{{- if .Values.webui.ingress.enabled }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ollama-webui-ingress
namespace: {{ .Values.namespace }}
{{- if .Values.webui.ingress.annotations }}
annotations:
{{ toYaml .Values.webui.ingress.annotations | trimSuffix "\n" | indent 4 }}
{{- end }}
spec:
rules:
- host: {{ .Values.webui.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ollama-webui-service
port:
number: {{ .Values.webui.servicePort }}
{{- end }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: ollama-webui
name: ollama-webui-pvc
namespace: {{ .Values.namespace }}
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: {{ .Values.webui.volumeSize }}
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: ollama-webui-service
namespace: {{ .Values.namespace }}
spec:
type: {{ .Values.webui.service.type }} # Default: NodePort # Use LoadBalancer if you're on a cloud that supports it
selector:
app: ollama-webui
ports:
- protocol: TCP
port: {{ .Values.webui.servicePort }}
targetPort: {{ .Values.webui.servicePort }}
# If using NodePort, you can optionally specify the nodePort:
# nodePort: 30000
\ No newline at end of file
namespace: ollama-namespace
ollama:
replicaCount: 1
image: ollama/ollama:latest
servicePort: 11434
resources:
limits:
cpu: "2000m"
memory: "2Gi"
nvidia.com/gpu: "0"
volumeSize: 1Gi
nodeSelector: {}
tolerations: []
service:
type: ClusterIP
gpu:
enabled: false
webui:
replicaCount: 1
image: ghcr.io/ollama-webui/ollama-webui:main
servicePort: 8080
resources:
limits:
cpu: "500m"
memory: "500Mi"
ingress:
enabled: true
annotations:
# Use appropriate annotations for your Ingress controller, e.g., for NGINX:
# nginx.ingress.kubernetes.io/rewrite-target: /
host: ollama.minikube.local
volumeSize: 1Gi
nodeSelector: {}
tolerations: []
service:
type: NodePort
\ No newline at end of file
apiVersion: v1
kind: Namespace
metadata:
name: ollama-namespace
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: ollama-service
namespace: ollama-namespace
spec:
selector:
app: ollama
ports:
- protocol: TCP
port: 11434
targetPort: 11434
\ No newline at end of file
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ollama
namespace: ollama-namespace
spec:
serviceName: "ollama"
replicas: 1
selector:
matchLabels:
app: ollama
template:
metadata:
labels:
app: ollama
spec:
containers:
- name: ollama
image: ollama/ollama:latest
ports:
- containerPort: 11434
resources:
limits:
cpu: "2000m"
memory: "2Gi"
volumeMounts:
- name: ollama-volume
mountPath: /root/.ollama
tty: true
volumeClaimTemplates:
- metadata:
name: ollama-volume
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: ollama-webui-deployment
namespace: ollama-namespace
spec:
replicas: 1
selector:
matchLabels:
app: ollama-webui
template:
metadata:
labels:
app: ollama-webui
spec:
containers:
- name: ollama-webui
image: ghcr.io/ollama-webui/ollama-webui:main
ports:
- containerPort: 8080
resources:
limits:
cpu: "500m"
memory: "500Mi"
env:
- name: OLLAMA_API_BASE_URL
value: "http://ollama-service.ollama-namespace.svc.cluster.local:11434/api"
tty: true
\ No newline at end of file
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ollama-webui-ingress
namespace: ollama-namespace
#annotations:
# Use appropriate annotations for your Ingress controller, e.g., for NGINX:
# nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: ollama.minikube.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ollama-webui-service
port:
number: 8080
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment