Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
open-webui
Commits
ee155937
Unverified
Commit
ee155937
authored
Jan 01, 2024
by
Timothy Jaeryang Baek
Committed by
GitHub
Jan 01, 2024
Browse files
Merge branch 'main' into rag
parents
b16f0ea2
6594661d
Changes
41
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
600 additions
and
1 deletion
+600
-1
kubernetes/helm/templates/ollama-namespace.yaml
kubernetes/helm/templates/ollama-namespace.yaml
+4
-0
kubernetes/helm/templates/ollama-service.yaml
kubernetes/helm/templates/ollama-service.yaml
+13
-0
kubernetes/helm/templates/ollama-statefulset.yaml
kubernetes/helm/templates/ollama-statefulset.yaml
+55
-0
kubernetes/helm/templates/webui-deployment.yaml
kubernetes/helm/templates/webui-deployment.yaml
+38
-0
kubernetes/helm/templates/webui-ingress.yaml
kubernetes/helm/templates/webui-ingress.yaml
+23
-0
kubernetes/helm/templates/webui-pvc.yaml
kubernetes/helm/templates/webui-pvc.yaml
+12
-0
kubernetes/helm/templates/webui-service.yaml
kubernetes/helm/templates/webui-service.yaml
+15
-0
kubernetes/helm/values.yaml
kubernetes/helm/values.yaml
+38
-0
kubernetes/manifest/base/ollama-namespace.yaml
kubernetes/manifest/base/ollama-namespace.yaml
+4
-0
kubernetes/manifest/base/ollama-service.yaml
kubernetes/manifest/base/ollama-service.yaml
+12
-0
kubernetes/manifest/base/ollama-statefulset.yaml
kubernetes/manifest/base/ollama-statefulset.yaml
+37
-0
kubernetes/manifest/base/webui-deployment.yaml
kubernetes/manifest/base/webui-deployment.yaml
+28
-0
kubernetes/manifest/base/webui-ingress.yaml
kubernetes/manifest/base/webui-ingress.yaml
+20
-0
kubernetes/manifest/base/webui-service.yaml
kubernetes/manifest/base/webui-service.yaml
+15
-0
kubernetes/manifest/kustomization.yaml
kubernetes/manifest/kustomization.yaml
+12
-0
kubernetes/manifest/patches/ollama-statefulset-gpu.yaml
kubernetes/manifest/patches/ollama-statefulset-gpu.yaml
+17
-0
run-compose.sh
run-compose.sh
+237
-0
src/app.css
src/app.css
+1
-1
src/lib/components/chat/MessageInput.svelte
src/lib/components/chat/MessageInput.svelte
+18
-0
src/lib/components/chat/Messages/ResponseMessage.svelte
src/lib/components/chat/Messages/ResponseMessage.svelte
+1
-0
No files found.
kubernetes/helm/templates/ollama-namespace.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Namespace
metadata
:
name
:
{{
.Values.namespace
}}
\ No newline at end of file
kubernetes/helm/templates/ollama-service.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Service
metadata
:
name
:
ollama-service
namespace
:
{{
.Values.namespace
}}
spec
:
type
:
{{
.Values.ollama.service.type
}}
selector
:
app
:
ollama
ports
:
-
protocol
:
TCP
port
:
{{
.Values.ollama.servicePort
}}
targetPort
:
{{
.Values.ollama.servicePort
}}
\ No newline at end of file
kubernetes/helm/templates/ollama-statefulset.yaml
0 → 100644
View file @
ee155937
apiVersion
:
apps/v1
kind
:
StatefulSet
metadata
:
name
:
ollama
namespace
:
{{
.Values.namespace
}}
spec
:
serviceName
:
"
ollama"
replicas
:
{{
.Values.ollama.replicaCount
}}
selector
:
matchLabels
:
app
:
ollama
template
:
metadata
:
labels
:
app
:
ollama
spec
:
containers
:
-
name
:
ollama
image
:
{{
.Values.ollama.image
}}
ports
:
-
containerPort
:
{{
.Values.ollama.servicePort
}}
env
:
{{
- if .Values.ollama.gpu.enabled
}}
-
name
:
PATH
value
:
/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-
name
:
LD_LIBRARY_PATH
value
:
/usr/local/nvidia/lib:/usr/local/nvidia/lib64
-
name
:
NVIDIA_DRIVER_CAPABILITIES
value
:
compute,utility
{{
- end
}}
{{
- if .Values.ollama.resources
}}
resources
:
{{
- toYaml .Values.ollama.resources | nindent 10
}}
{{
- end
}}
volumeMounts
:
-
name
:
ollama-volume
mountPath
:
/root/.ollama
tty
:
true
{{
- with .Values.ollama.nodeSelector
}}
nodeSelector
:
{{
- toYaml . | nindent 8
}}
{{
- end
}}
tolerations
:
{{
- if .Values.ollama.gpu.enabled
}}
-
key
:
nvidia.com/gpu
operator
:
Exists
effect
:
NoSchedule
{{
- end
}}
volumeClaimTemplates
:
-
metadata
:
name
:
ollama-volume
spec
:
accessModes
:
[
"
ReadWriteOnce"
]
resources
:
requests
:
storage
:
{{
.Values.ollama.volumeSize
}}
\ No newline at end of file
kubernetes/helm/templates/webui-deployment.yaml
0 → 100644
View file @
ee155937
apiVersion
:
apps/v1
kind
:
Deployment
metadata
:
name
:
ollama-webui-deployment
namespace
:
{{
.Values.namespace
}}
spec
:
replicas
:
1
selector
:
matchLabels
:
app
:
ollama-webui
template
:
metadata
:
labels
:
app
:
ollama-webui
spec
:
containers
:
-
name
:
ollama-webui
image
:
{{
.Values.webui.image
}}
ports
:
-
containerPort
:
8080
{{
- if .Values.webui.resources
}}
resources
:
{{
- toYaml .Values.webui.resources | nindent 10
}}
{{
- end
}}
volumeMounts
:
-
name
:
webui-volume
mountPath
:
/app/backend/data
env
:
-
name
:
OLLAMA_API_BASE_URL
value
:
"
http://ollama-service.{{
.Values.namespace
}}.svc.cluster.local:{{
.Values.ollama.servicePort
}}/api"
tty
:
true
{{
- with .Values.webui.nodeSelector
}}
nodeSelector
:
{{
- toYaml . | nindent 8
}}
{{
- end
}}
volumes
:
-
name
:
webui-volume
persistentVolumeClaim
:
claimName
:
ollama-webui-pvc
\ No newline at end of file
kubernetes/helm/templates/webui-ingress.yaml
0 → 100644
View file @
ee155937
{{
- if .Values.webui.ingress.enabled
}}
apiVersion
:
networking.k8s.io/v1
kind
:
Ingress
metadata
:
name
:
ollama-webui-ingress
namespace
:
{{
.Values.namespace
}}
{{
- if .Values.webui.ingress.annotations
}}
annotations
:
{{
toYaml .Values.webui.ingress.annotations | trimSuffix "\n" | indent 4
}}
{{
- end
}}
spec
:
rules
:
-
host
:
{{
.Values.webui.ingress.host
}}
http
:
paths
:
-
path
:
/
pathType
:
Prefix
backend
:
service
:
name
:
ollama-webui-service
port
:
number
:
{{
.Values.webui.servicePort
}}
{{
- end
}}
kubernetes/helm/templates/webui-pvc.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
PersistentVolumeClaim
metadata
:
labels
:
app
:
ollama-webui
name
:
ollama-webui-pvc
namespace
:
{{
.Values.namespace
}}
spec
:
accessModes
:
[
"
ReadWriteOnce"
]
resources
:
requests
:
storage
:
{{
.Values.webui.volumeSize
}}
\ No newline at end of file
kubernetes/helm/templates/webui-service.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Service
metadata
:
name
:
ollama-webui-service
namespace
:
{{
.Values.namespace
}}
spec
:
type
:
{{
.Values.webui.service.type
}}
# Default: NodePort # Use LoadBalancer if you're on a cloud that supports it
selector
:
app
:
ollama-webui
ports
:
-
protocol
:
TCP
port
:
{{
.Values.webui.servicePort
}}
targetPort
:
{{
.Values.webui.servicePort
}}
# If using NodePort, you can optionally specify the nodePort:
# nodePort: 30000
\ No newline at end of file
kubernetes/helm/values.yaml
0 → 100644
View file @
ee155937
namespace
:
ollama-namespace
ollama
:
replicaCount
:
1
image
:
ollama/ollama:latest
servicePort
:
11434
resources
:
limits
:
cpu
:
"
2000m"
memory
:
"
2Gi"
nvidia.com/gpu
:
"
0"
volumeSize
:
1Gi
nodeSelector
:
{}
tolerations
:
[]
service
:
type
:
ClusterIP
gpu
:
enabled
:
false
webui
:
replicaCount
:
1
image
:
ghcr.io/ollama-webui/ollama-webui:main
servicePort
:
8080
resources
:
limits
:
cpu
:
"
500m"
memory
:
"
500Mi"
ingress
:
enabled
:
true
annotations
:
# Use appropriate annotations for your Ingress controller, e.g., for NGINX:
# nginx.ingress.kubernetes.io/rewrite-target: /
host
:
ollama.minikube.local
volumeSize
:
1Gi
nodeSelector
:
{}
tolerations
:
[]
service
:
type
:
NodePort
\ No newline at end of file
kubernetes/manifest/base/ollama-namespace.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Namespace
metadata
:
name
:
ollama-namespace
\ No newline at end of file
kubernetes/manifest/base/ollama-service.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Service
metadata
:
name
:
ollama-service
namespace
:
ollama-namespace
spec
:
selector
:
app
:
ollama
ports
:
-
protocol
:
TCP
port
:
11434
targetPort
:
11434
\ No newline at end of file
kubernetes/manifest/base/ollama-statefulset.yaml
0 → 100644
View file @
ee155937
apiVersion
:
apps/v1
kind
:
StatefulSet
metadata
:
name
:
ollama
namespace
:
ollama-namespace
spec
:
serviceName
:
"
ollama"
replicas
:
1
selector
:
matchLabels
:
app
:
ollama
template
:
metadata
:
labels
:
app
:
ollama
spec
:
containers
:
-
name
:
ollama
image
:
ollama/ollama:latest
ports
:
-
containerPort
:
11434
resources
:
limits
:
cpu
:
"
2000m"
memory
:
"
2Gi"
volumeMounts
:
-
name
:
ollama-volume
mountPath
:
/root/.ollama
tty
:
true
volumeClaimTemplates
:
-
metadata
:
name
:
ollama-volume
spec
:
accessModes
:
[
"
ReadWriteOnce"
]
resources
:
requests
:
storage
:
1Gi
\ No newline at end of file
kubernetes/manifest/base/webui-deployment.yaml
0 → 100644
View file @
ee155937
apiVersion
:
apps/v1
kind
:
Deployment
metadata
:
name
:
ollama-webui-deployment
namespace
:
ollama-namespace
spec
:
replicas
:
1
selector
:
matchLabels
:
app
:
ollama-webui
template
:
metadata
:
labels
:
app
:
ollama-webui
spec
:
containers
:
-
name
:
ollama-webui
image
:
ghcr.io/ollama-webui/ollama-webui:main
ports
:
-
containerPort
:
8080
resources
:
limits
:
cpu
:
"
500m"
memory
:
"
500Mi"
env
:
-
name
:
OLLAMA_API_BASE_URL
value
:
"
http://ollama-service.ollama-namespace.svc.cluster.local:11434/api"
tty
:
true
\ No newline at end of file
kubernetes/manifest/base/webui-ingress.yaml
0 → 100644
View file @
ee155937
apiVersion
:
networking.k8s.io/v1
kind
:
Ingress
metadata
:
name
:
ollama-webui-ingress
namespace
:
ollama-namespace
#annotations:
# Use appropriate annotations for your Ingress controller, e.g., for NGINX:
# nginx.ingress.kubernetes.io/rewrite-target: /
spec
:
rules
:
-
host
:
ollama.minikube.local
http
:
paths
:
-
path
:
/
pathType
:
Prefix
backend
:
service
:
name
:
ollama-webui-service
port
:
number
:
8080
kubernetes/manifest/base/webui-service.yaml
0 → 100644
View file @
ee155937
apiVersion
:
v1
kind
:
Service
metadata
:
name
:
ollama-webui-service
namespace
:
ollama-namespace
spec
:
type
:
NodePort
# Use LoadBalancer if you're on a cloud that supports it
selector
:
app
:
ollama-webui
ports
:
-
protocol
:
TCP
port
:
8080
targetPort
:
8080
# If using NodePort, you can optionally specify the nodePort:
# nodePort: 30000
\ No newline at end of file
kubernetes/manifest/kustomization.yaml
0 → 100644
View file @
ee155937
resources
:
-
base/ollama-namespace.yaml
-
base/ollama-service.yaml
-
base/ollama-statefulset.yaml
-
base/webui-deployment.yaml
-
base/webui-service.yaml
-
base/webui-ingress.yaml
apiVersion
:
kustomize.config.k8s.io/v1beta1
kind
:
Kustomization
patches
:
-
path
:
patches/ollama-statefulset-gpu.yaml
kubernetes/manifest/patches/ollama-statefulset-gpu.yaml
0 → 100644
View file @
ee155937
apiVersion
:
apps/v1
kind
:
StatefulSet
metadata
:
name
:
ollama
namespace
:
ollama-namespace
spec
:
selector
:
matchLabels
:
app
:
ollama
serviceName
:
"
ollama"
template
:
spec
:
containers
:
-
name
:
ollama
resources
:
limits
:
nvidia.com/gpu
:
"
1"
run-compose.sh
0 → 100755
View file @
ee155937
#!/bin/bash
# Define color and formatting codes
BOLD
=
'\033[1m'
GREEN
=
'\033[1;32m'
WHITE
=
'\033[1;37m'
RED
=
'\033[0;31m'
NC
=
'\033[0m'
# No Color
# Unicode character for tick mark
TICK
=
'\u2713'
# Detect GPU driver
get_gpu_driver
()
{
# Detect NVIDIA GPUs
if
lspci |
grep
-i
nvidia
>
/dev/null
;
then
echo
"nvidia"
return
fi
# Detect AMD GPUs (including GCN architecture check for amdgpu vs radeon)
if
lspci |
grep
-i
amd
>
/dev/null
;
then
# List of known GCN and later architecture cards
# This is a simplified list, and in a real-world scenario, you'd want a more comprehensive one
local
gcn_and_later
=(
"Radeon HD 7000"
"Radeon HD 8000"
"Radeon R5"
"Radeon R7"
"Radeon R9"
"Radeon RX"
)
# Get GPU information
local
gpu_info
=
$(
lspci |
grep
-i
'vga.*amd'
)
for
model
in
"
${
gcn_and_later
[@]
}
"
;
do
if
echo
"
$gpu_info
"
|
grep
-iq
"
$model
"
;
then
echo
"amdgpu"
return
fi
done
# Default to radeon if no GCN or later architecture is detected
echo
"radeon"
return
fi
# Detect Intel GPUs
if
lspci |
grep
-i
intel
>
/dev/null
;
then
echo
"i915"
return
fi
# If no known GPU is detected
echo
"Unknown or unsupported GPU driver"
exit
1
}
# Function for rolling animation
show_loading
()
{
local
spin
=
'-\|/'
local
i
=
0
printf
" "
while
kill
-0
$1
2>/dev/null
;
do
i
=
$((
(
i+1
)
%
4
))
printf
"
\b
${
spin
:
$i
:1
}
"
sleep
.1
done
# Replace the spinner with a tick
printf
"
\b
${
GREEN
}${
TICK
}${
NC
}
"
}
# Usage information
usage
()
{
echo
"Usage:
$0
[OPTIONS]"
echo
"Options:"
echo
" --enable-gpu[count=COUNT] Enable GPU support with the specified count."
echo
" --enable-api[port=PORT] Enable API and expose it on the specified port."
echo
" --webui[port=PORT] Set the port for the web user interface."
echo
" --data[folder=PATH] Bind mount for ollama data folder (by default will create the 'ollama' volume)."
echo
" --build Build the docker image before running the compose project."
echo
" --drop Drop the compose project."
echo
" -q, --quiet Run script in headless mode."
echo
" -h, --help Show this help message."
echo
""
echo
"Examples:"
echo
"
$0
--drop"
echo
"
$0
--enable-gpu[count=1]"
echo
"
$0
--enable-api[port=11435]"
echo
"
$0
--enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000]"
echo
"
$0
--enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data]"
echo
"
$0
--enable-gpu[count=1] --enable-api[port=12345] --webui[port=3000] --data[folder=./ollama-data] --build"
echo
""
echo
"This script configures and runs a docker-compose setup with optional GPU support, API exposure, and web UI configuration."
echo
"About the gpu to use, the script automatically detects it using the "
lspci
" command."
echo
"In this case the gpu detected is:
$(
get_gpu_driver
)
"
}
# Default values
gpu_count
=
1
api_port
=
11435
webui_port
=
3000
headless
=
false
build_image
=
false
kill_compose
=
false
# Function to extract value from the parameter
extract_value
()
{
echo
"
$1
"
|
sed
-E
's/.*\[.*=(.*)\].*/\1/; t; s/.*//'
}
# Parse arguments
while
[[
$#
-gt
0
]]
;
do
key
=
"
$1
"
case
$key
in
--enable-gpu
*
)
enable_gpu
=
true
value
=
$(
extract_value
"
$key
"
)
gpu_count
=
${
value
:-
1
}
;;
--enable-api
*
)
enable_api
=
true
value
=
$(
extract_value
"
$key
"
)
api_port
=
${
value
:-
11435
}
;;
--webui
*
)
value
=
$(
extract_value
"
$key
"
)
webui_port
=
${
value
:-
3000
}
;;
--data
*
)
value
=
$(
extract_value
"
$key
"
)
data_dir
=
${
value
:-
"./ollama-data"
}
;;
--drop
)
kill_compose
=
true
;;
--build
)
build_image
=
true
;;
-q
|
--quiet
)
headless
=
true
;;
-h
|
--help
)
usage
exit
;;
*
)
# Unknown option
echo
"Unknown option:
$key
"
usage
exit
1
;;
esac
shift
# past argument or value
done
if
[[
$kill_compose
==
true
]]
;
then
docker compose down
--remove-orphans
echo
-e
"
${
GREEN
}${
BOLD
}
Compose project dropped successfully.
${
NC
}
"
exit
else
DEFAULT_COMPOSE_COMMAND
=
"docker compose -f docker-compose.yaml"
if
[[
$enable_gpu
==
true
]]
;
then
# Validate and process command-line arguments
if
[[
-n
$gpu_count
]]
;
then
if
!
[[
$gpu_count
=
~ ^[0-9]+
$
]]
;
then
echo
"Invalid GPU count:
$gpu_count
"
exit
1
fi
echo
"Enabling GPU with
$gpu_count
GPUs"
# Add your GPU allocation logic here
export
OLLAMA_GPU_DRIVER
=
$(
get_gpu_driver
)
export
OLLAMA_GPU_COUNT
=
$gpu_count
# Set OLLAMA_GPU_COUNT environment variable
fi
DEFAULT_COMPOSE_COMMAND+
=
" -f docker-compose.gpu.yaml"
fi
if
[[
$enable_api
==
true
]]
;
then
DEFAULT_COMPOSE_COMMAND+
=
" -f docker-compose.api.yaml"
if
[[
-n
$api_port
]]
;
then
export
OLLAMA_WEBAPI_PORT
=
$api_port
# Set OLLAMA_WEBAPI_PORT environment variable
fi
fi
if
[[
-n
$data_dir
]]
;
then
DEFAULT_COMPOSE_COMMAND+
=
" -f docker-compose.data.yaml"
export
OLLAMA_DATA_DIR
=
$data_dir
# Set OLLAMA_DATA_DIR environment variable
fi
DEFAULT_COMPOSE_COMMAND+
=
" up -d"
DEFAULT_COMPOSE_COMMAND+
=
" --remove-orphans"
DEFAULT_COMPOSE_COMMAND+
=
" --force-recreate"
if
[[
$build_image
==
true
]]
;
then
DEFAULT_COMPOSE_COMMAND+
=
" --build"
fi
fi
# Recap of environment variables
echo
echo
-e
"
${
WHITE
}${
BOLD
}
Current Setup:
${
NC
}
"
echo
-e
"
${
GREEN
}${
BOLD
}
GPU Driver:
${
NC
}
${
OLLAMA_GPU_DRIVER
:-
Not
Enabled
}
"
echo
-e
"
${
GREEN
}${
BOLD
}
GPU Count:
${
NC
}
${
OLLAMA_GPU_COUNT
:-
Not
Enabled
}
"
echo
-e
"
${
GREEN
}${
BOLD
}
WebAPI Port:
${
NC
}
${
OLLAMA_WEBAPI_PORT
:-
Not
Enabled
}
"
echo
-e
"
${
GREEN
}${
BOLD
}
Data Folder:
${
NC
}
${
data_dir
:-
Using
ollama volume
}
"
echo
-e
"
${
GREEN
}${
BOLD
}
WebUI Port:
${
NC
}
$webui_port
"
echo
if
[[
$headless
==
true
]]
;
then
echo
-ne
"
${
WHITE
}${
BOLD
}
Running in headless mode...
${
NC
}
"
choice
=
"y"
else
# Ask for user acceptance
echo
-ne
"
${
WHITE
}${
BOLD
}
Do you want to proceed with current setup? (Y/n):
${
NC
}
"
read
-n1
-s
choice
fi
echo
if
[[
$choice
==
""
||
$choice
==
"y"
]]
;
then
# Execute the command with the current user
eval
"
$DEFAULT_COMPOSE_COMMAND
"
&
# Capture the background process PID
PID
=
$!
# Display the loading animation
#show_loading $PID
# Wait for the command to finish
wait
$PID
echo
# Check exit status
if
[
$?
-eq
0
]
;
then
echo
-e
"
${
GREEN
}${
BOLD
}
Compose project started successfully.
${
NC
}
"
else
echo
-e
"
${
RED
}${
BOLD
}
There was an error starting the compose project.
${
NC
}
"
fi
else
echo
"Aborted."
fi
echo
src/app.css
View file @
ee155937
...
@@ -16,7 +16,7 @@ html {
...
@@ -16,7 +16,7 @@ html {
code
{
code
{
/* white-space-collapse: preserve !important; */
/* white-space-collapse: preserve !important; */
white-space
:
pre
;
overflow-x
:
auto
;
width
:
auto
;
width
:
auto
;
}
}
...
...
src/lib/components/chat/MessageInput.svelte
View file @
ee155937
...
@@ -298,6 +298,24 @@
...
@@ -298,6 +298,24 @@
submitPrompt(prompt);
submitPrompt(prompt);
}
}
}}
}}
on:keydown={(e) => {
if (prompt === '' && e.key == 'ArrowUp') {
e.preventDefault();
const userMessageElement = [
...document.getElementsByClassName('user-message')
]?.at(-1);
const editButton = [
...document.getElementsByClassName('edit-user-message-button')
]?.at(-1);
console.log(userMessageElement);
userMessageElement.scrollIntoView({ block: 'center' });
editButton?.click();
}
}}
rows="1"
rows="1"
on:input={(e) => {
on:input={(e) => {
e.target.style.height = '';
e.target.style.height = '';
...
...
src/lib/components/chat/Messages/ResponseMessage.svelte
View file @
ee155937
...
@@ -88,6 +88,7 @@
...
@@ -88,6 +88,7 @@
let
code
=
block
.
querySelector
(
'
code
'
);
let
code
=
block
.
querySelector
(
'
code
'
);
code
.
style
.
borderTopRightRadius
=
0
;
code
.
style
.
borderTopRightRadius
=
0
;
code
.
style
.
borderTopLeftRadius
=
0
;
code
.
style
.
borderTopLeftRadius
=
0
;
code
.
style
.
whiteSpace
=
'
pre
'
;
let
topBarDiv
=
document
.
createElement
(
'
div
'
);
let
topBarDiv
=
document
.
createElement
(
'
div
'
);
topBarDiv
.
style
.
backgroundColor
=
'
#202123
'
;
topBarDiv
.
style
.
backgroundColor
=
'
#202123
'
;
...
...
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment