Commit 60433856 authored by Jun Siang Cheah's avatar Jun Siang Cheah
Browse files

Merge remote-tracking branch 'upstream/dev' into feat/backend-web-search

parents 224a578e 98194d97
# noqa: INP001
import shutil
import subprocess
from sys import stderr
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomBuildHook(BuildHookInterface):
def initialize(self, version, build_data):
super().initialize(version, build_data)
stderr.write(">>> Building Open Webui frontend\n")
npm = shutil.which("npm")
if npm is None:
raise RuntimeError(
"NodeJS `npm` is required for building Open Webui but it was not found"
)
stderr.write("### npm install\n")
subprocess.run([npm, "install"], check=True) # noqa: S603
stderr.write("\n### npm run build\n")
subprocess.run([npm, "run", "build"], check=True) # noqa: S603
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"dependencies": {
"@pyscript/core": "^0.4.32",
"@sveltejs/adapter-node": "^1.3.1",
......
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"private": true,
"scripts": {
"dev": "npm run pyodide:fetch && vite dev --host",
......@@ -13,7 +13,7 @@
"lint:types": "npm run check",
"lint:backend": "pylint backend/",
"format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"",
"format:backend": "black . --exclude \"/venv/\"",
"format:backend": "black . --exclude \".venv/|/venv/\"",
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"",
"cy:open": "cypress open",
"test:frontend": "vitest",
......
[project]
name = "open-webui"
description = "Open WebUI (Formerly Ollama WebUI)"
authors = [
{ name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
]
license = { file = "LICENSE" }
dependencies = [
"fastapi==0.111.0",
"uvicorn[standard]==0.22.0",
"pydantic==2.7.1",
"python-multipart==0.0.9",
"Flask==3.0.3",
"Flask-Cors==4.0.1",
"python-socketio==5.11.2",
"python-jose==3.3.0",
"passlib[bcrypt]==1.7.4",
"requests==2.32.2",
"aiohttp==3.9.5",
"peewee==3.17.5",
"peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9",
"PyMySQL==1.1.0",
"bcrypt==4.1.3",
"litellm[proxy]==1.37.20",
"boto3==1.34.110",
"argon2-cffi==23.1.0",
"APScheduler==3.10.4",
"google-generativeai==0.5.4",
"langchain==0.2.0",
"langchain-community==0.2.0",
"langchain-chroma==0.1.1",
"fake-useragent==1.5.1",
"chromadb==0.5.0",
"sentence-transformers==2.7.0",
"pypdf==4.2.0",
"docx2txt==0.8",
"unstructured==0.14.0",
"Markdown==3.6",
"pypandoc==1.13",
"pandas==2.2.2",
"openpyxl==3.1.2",
"pyxlsb==1.0.10",
"xlrd==2.0.1",
"validators==0.28.1",
"opencv-python-headless==4.9.0.80",
"rapidocr-onnxruntime==1.3.22",
"fpdf2==2.7.9",
"rank-bm25==0.2.2",
"faster-whisper==1.0.2",
"PyJWT[crypto]==2.8.0",
"black==24.4.2",
"langfuse==2.33.0",
"youtube-transcript-api==0.6.2",
"pytube==15.0.0",
]
readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Topic :: Communications :: Chat",
"Topic :: Multimedia",
]
[project.scripts]
open-webui = "open_webui:app"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = []
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.version]
path = "package.json"
pattern = '"version":\s*"(?P<version>[^"]+)"'
[tool.hatch.build.hooks.custom] # keep this for reading hooks from `hatch_build.py`
[tool.hatch.build.targets.wheel]
sources = ["backend"]
exclude = [
".dockerignore",
".gitignore",
".webui_secret_key",
"dev.sh",
"requirements.txt",
"start.sh",
"start_windows.bat",
"webui.db",
"chroma.sqlite3",
]
force-include = { "CHANGELOG.md" = "open_webui/CHANGELOG.md", build = "open_webui/frontend" }
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.3
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.110
# via open-webui
botocore==1.34.110
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.5.0
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via fastapi
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.111.0
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-cli==0.0.4
# via fastapi
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.2
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.1
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.9
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.4
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.4
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==22.0.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via fastapi
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.2.0
# via langchain-community
# via open-webui
langchain-chroma==0.1.1
# via open-webui
langchain-community==0.2.0
# via open-webui
langchain-core==0.2.1
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.2.0
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.33.0
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.37.20
# via litellm
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via fastapi
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
# via passlib
pathspec==0.12.1
# via black
peewee==3.17.5
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
# via pyjwt
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via fastapi
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.3.22
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.32.2
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.37.2
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
# via fastapi-cli
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
ujson==5.10.0
# via fastapi
unstructured==0.14.0
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via fastapi
# via litellm
# via open-webui
# via uvicorn
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.3
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.110
# via open-webui
botocore==1.34.110
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.5.0
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via fastapi
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.111.0
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-cli==0.0.4
# via fastapi
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.2
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.1
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.9
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.4
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.4
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==22.0.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via fastapi
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.2.0
# via langchain-community
# via open-webui
langchain-chroma==0.1.1
# via open-webui
langchain-community==0.2.0
# via open-webui
langchain-core==0.2.1
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.2.0
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.33.0
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.37.20
# via litellm
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via fastapi
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
# via passlib
pathspec==0.12.1
# via black
peewee==3.17.5
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
# via pyjwt
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via fastapi
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.3.22
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.32.2
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.37.2
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
# via fastapi-cli
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
ujson==5.10.0
# via fastapi
unstructured==0.14.0
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via fastapi
# via litellm
# via open-webui
# via uvicorn
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
import { OLLAMA_API_BASE_URL } from '$lib/constants';
import { promptTemplate } from '$lib/utils';
export const getOllamaConfig = async (token: string = '') => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const updateOllamaConfig = async (token: string = '', enable_ollama_api: boolean) => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
enable_ollama_api: enable_ollama_api
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const getOllamaUrls = async (token: string = '') => {
let error = null;
......
This diff is collapsed.
......@@ -101,7 +101,7 @@
try {
const micropip = pyodide.pyimport('micropip');
await micropip.set_index_urls('https://pypi.org/pypi/{package_name}/json');
// await micropip.set_index_urls('https://pypi.org/pypi/{package_name}/json');
let packages = [
code.includes('requests') ? 'requests' : null,
......
......@@ -490,7 +490,7 @@
<div class=" mt-2 mb-1 flex justify-end space-x-1.5 text-sm font-medium">
<button
id="close-edit-message-button"
class=" px-4 py-2 bg-gray-900 hover:bg-gray-850 text-gray-100 transition rounded-3xl"
class="px-4 py-2 bg-white hover:bg-gray-100 text-gray-800 transition rounded-3xl"
on:click={() => {
cancelEditMessage();
}}
......@@ -500,7 +500,7 @@
<button
id="save-edit-message-button"
class="px-4 py-2 bg-white hover:bg-gray-100 text-gray-800 transition rounded-3xl"
class=" px-4 py-2 bg-gray-900 hover:bg-gray-850 text-gray-100 transition rounded-3xl"
on:click={() => {
editMessageConfirmHandler();
}}
......
......@@ -201,7 +201,7 @@
<div class=" mt-2 mb-1 flex justify-end space-x-1.5 text-sm font-medium">
<button
id="close-edit-message-button"
class=" px-4 py-2 bg-gray-900 hover:bg-gray-850 text-gray-100 transition rounded-3xl"
class="px-4 py-2 bg-white hover:bg-gray-100 text-gray-800 transition rounded-3xl"
on:click={() => {
cancelEditMessage();
}}
......@@ -211,7 +211,7 @@
<button
id="save-edit-message-button"
class="px-4 py-2 bg-white hover:bg-gray-100 text-gray-800 transition rounded-3xl"
class=" px-4 py-2 bg-gray-900 hover:bg-gray-850 text-gray-100 transition rounded-3xl"
on:click={() => {
editMessageConfirmHandler();
}}
......
......@@ -3,7 +3,13 @@
import { createEventDispatcher, onMount, getContext } from 'svelte';
const dispatch = createEventDispatcher();
import { getOllamaUrls, getOllamaVersion, updateOllamaUrls } from '$lib/apis/ollama';
import {
getOllamaConfig,
getOllamaUrls,
getOllamaVersion,
updateOllamaConfig,
updateOllamaUrls
} from '$lib/apis/ollama';
import {
getOpenAIConfig,
getOpenAIKeys,
......@@ -26,6 +32,7 @@
let OPENAI_API_BASE_URLS = [''];
let ENABLE_OPENAI_API = false;
let ENABLE_OLLAMA_API = false;
const updateOpenAIHandler = async () => {
OPENAI_API_BASE_URLS = await updateOpenAIUrls(localStorage.token, OPENAI_API_BASE_URLS);
......@@ -50,10 +57,13 @@
onMount(async () => {
if ($user.role === 'admin') {
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token);
const ollamaConfig = await getOllamaConfig(localStorage.token);
const openaiConfig = await getOpenAIConfig(localStorage.token);
const config = await getOpenAIConfig(localStorage.token);
ENABLE_OPENAI_API = config.ENABLE_OPENAI_API;
ENABLE_OPENAI_API = openaiConfig.ENABLE_OPENAI_API;
ENABLE_OLLAMA_API = ollamaConfig.ENABLE_OLLAMA_API;
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token);
OPENAI_API_BASE_URLS = await getOpenAIUrls(localStorage.token);
OPENAI_API_KEYS = await getOpenAIKeys(localStorage.token);
......@@ -161,95 +171,108 @@
<hr class=" dark:border-gray-700" />
<div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Ollama Base URL')}</div>
<div class="flex w-full gap-1.5">
<div class="flex-1 flex flex-col gap-2">
{#each OLLAMA_BASE_URLS as url, idx}
<div class="flex gap-1.5">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Enter URL (e.g. http://localhost:11434)')}
bind:value={url}
/>
<div class="pr-1.5 space-y-2">
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">{$i18n.t('Ollama API')}</div>
<div class="mt-1">
<Switch
bind:state={ENABLE_OLLAMA_API}
on:change={async () => {
updateOllamaConfig(localStorage.token, ENABLE_OLLAMA_API);
}}
/>
</div>
</div>
{#if ENABLE_OLLAMA_API}
<div class="flex w-full gap-1.5">
<div class="flex-1 flex flex-col gap-2">
{#each OLLAMA_BASE_URLS as url, idx}
<div class="flex gap-1.5">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Enter URL (e.g. http://localhost:11434)')}
bind:value={url}
/>
<div class="self-center flex items-center">
{#if idx === 0}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = [...OLLAMA_BASE_URLS, ''];
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
<div class="self-center flex items-center">
{#if idx === 0}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = [...OLLAMA_BASE_URLS, ''];
}}
type="button"
>
<path
d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
/>
</svg>
</button>
{:else}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = OLLAMA_BASE_URLS.filter((url, urlIdx) => idx !== urlIdx);
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
/>
</svg>
</button>
{:else}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = OLLAMA_BASE_URLS.filter((url, urlIdx) => idx !== urlIdx);
}}
type="button"
>
<path d="M3.75 7.25a.75.75 0 0 0 0 1.5h8.5a.75.75 0 0 0 0-1.5h-8.5Z" />
</svg>
</button>
{/if}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path d="M3.75 7.25a.75.75 0 0 0 0 1.5h8.5a.75.75 0 0 0 0-1.5h-8.5Z" />
</svg>
</button>
{/if}
</div>
</div>
</div>
{/each}
</div>
{/each}
</div>
<div class="">
<button
class="p-2.5 bg-gray-200 hover:bg-gray-300 dark:bg-gray-850 dark:hover:bg-gray-800 rounded-lg transition"
on:click={() => {
updateOllamaUrlsHandler();
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
<div class="flex">
<button
class="self-center p-2 bg-gray-200 hover:bg-gray-300 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
on:click={() => {
updateOllamaUrlsHandler();
}}
type="button"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
</div>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t('Trouble accessing Ollama?')}
<a
class=" text-gray-300 font-medium underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
{$i18n.t('Click here for help.')}
</a>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t('Trouble accessing Ollama?')}
<a
class=" text-gray-300 font-medium underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
{$i18n.t('Click here for help.')}
</a>
</div>
{/if}
</div>
</div>
......
......@@ -54,7 +54,7 @@
class=" flex flex-col w-full sm:flex-row sm:justify-center sm:space-x-6 h-[28rem] max-h-screen outline outline-1 rounded-xl outline-gray-100 dark:outline-gray-800 mb-4 mt-1"
>
{#if memories.length > 0}
<div class="text-left text-sm w-full mb-4 max-h-[22rem] overflow-y-scroll">
<div class="text-left text-sm w-full mb-4 overflow-y-scroll">
<div class="relative overflow-x-auto">
<table class="w-full text-sm text-left text-gray-600 dark:text-gray-400 table-auto">
<thead
......
......@@ -19,5 +19,5 @@
showImagePreview = true;
}}
>
<img src={_src} {alt} class=" max-h-96 rounded-lg" draggable="false" />
<img src={_src} {alt} class=" max-h-96 rounded-lg" draggable="false" data-cy="image" />
</button>
{
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "",
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' أو '-1' لا توجد انتهاء",
"(Beta)": "(تجريبي)",
"(e.g. `sh webui.sh --api`)": "( `sh webui.sh --api`مثال)",
"(latest)": "(الأخير)",
"{{modelName}} is thinking...": "{{modelName}} ...يفكر",
"{{user}}'s Chats": "{{user}}' الدردشات",
"{{user}}'s Chats": "دردشات {{user}}",
"{{webUIName}} Backend Required": "{{webUIName}} مطلوب",
"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
"a user": "المستخدم",
"a user": "مستخدم",
"About": "عن",
"Account": "الحساب",
"Accurate information": "معلومات دقيقة",
"Add": "",
"Add a model": "أضافة موديل",
"Add a model tag name": "ضع تاق للأسم الموديل",
"Add": "أضف",
"Add a model": "أضف موديل",
"Add a model tag name": "ضع علامة للأسم الموديل",
"Add a short description about what this modelfile does": "أضف وصفًا قصيرًا حول ما يفعله ملف الموديل هذا",
"Add a short title for this prompt": "أضف عنوانًا قصيرًا لبداء المحادثة",
"Add a tag": "أضافة تاق",
"Add custom prompt": "أضافة مطالبة مخصصه",
"Add Docs": "إضافة المستندات",
"Add Files": "إضافة ملفات",
"Add Memory": "",
"Add Memory": "إضافة ذكرايات",
"Add message": "اضافة رسالة",
"Add Model": "اضافة موديل",
"Add Tags": "اضافة تاق",
......@@ -39,11 +39,11 @@
"Already have an account?": "هل تملك حساب ؟",
"an assistant": "مساعد",
"and": "و",
"and create a new shared link.": "",
"and create a new shared link.": "و أنشئ رابط مشترك جديد.",
"API Base URL": "API الرابط الرئيسي",
"API Key": "API مفتاح",
"API Key created.": "API تم أنشاء المفتاح",
"API keys": "API المفاتيح",
"API keys": "مفاتيح واجهة برمجة التطبيقات",
"API RPM": "API RPM",
"April": "أبريل",
"Archive": "الأرشيف",
......@@ -64,13 +64,13 @@
"before": "قبل",
"Being lazy": "كون كسول",
"Builder Mode": "بناء الموديل",
"Bypass SSL verification for Websites": "",
"Bypass SSL verification for Websites": "تجاوز التحقق من SSL للموقع",
"Cancel": "اللغاء",
"Categories": "التصنيفات",
"Change Password": "تغير الباسورد",
"Chat": "المحادثة",
"Chat Bubble UI": "",
"Chat direction": "",
"Chat Bubble UI": "UI الدردشة",
"Chat direction": "اتجاه المحادثة",
"Chat History": "تاريخ المحادثة",
"Chat History is off for this browser.": "سجل الدردشة معطل لهذا المتصفح",
"Chats": "المحادثات",
......@@ -164,7 +164,7 @@
"Edit Doc": "تعديل الملف",
"Edit User": "تعديل المستخدم",
"Email": "البريد",
"Embedding Model": "",
"Embedding Model": "نموذج التضمين",
"Embedding Model Engine": "تضمين محرك النموذج",
"Embedding model set to \"{{embedding_model}}\"": "تم تعيين نموذج التضمين على \"{{embedding_model}}\"",
"Enable Chat History": "تمكين سجل الدردشة",
......@@ -172,8 +172,8 @@
"Enabled": "تفعيل",
"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.",
"Enter {{role}} message here": "أدخل رسالة {{role}} هنا",
"Enter a detail about yourself for your LLMs to recall": "",
"Enter Chunk Overlap": "أدخل Chunk المتداخل",
"Enter a detail about yourself for your LLMs to recall": "ادخل معلومات عنك تريد أن يتذكرها الموديل",
"Enter Chunk Overlap": "أدخل الChunk Overlap",
"Enter Chunk Size": "أدخل Chunk الحجم",
"Enter Image Size (e.g. 512x512)": "(e.g. 512x512) أدخل حجم الصورة ",
"Enter language codes": "أدخل كود اللغة",
......@@ -186,9 +186,9 @@
"Enter Number of Steps (e.g. 50)": "(e.g. 50) أدخل عدد الخطوات",
"Enter Score": "أدخل النتيجة",
"Enter stop sequence": "أدخل تسلسل التوقف",
"Enter Top K": "Enter Top K",
"Enter Top K": "أدخل Top K",
"Enter URL (e.g. http://127.0.0.1:7860/)": "الرابط (e.g. http://127.0.0.1:7860/)",
"Enter URL (e.g. http://localhost:11434)": "",
"Enter URL (e.g. http://localhost:11434)": "URL (e.g. http://localhost:11434)",
"Enter Your Email": "أدخل البريد الاكتروني",
"Enter Your Full Name": "أدخل الاسم كامل",
"Enter Your Password": "ادخل كلمة المرور",
......@@ -217,7 +217,7 @@
"Generating search query": "",
"Generation Info": "معلومات الجيل",
"Good Response": "استجابة جيدة",
"h:mm a": "",
"h:mm a": "الساعة:الدقائق صباحا/مساء",
"has no conversations.": "ليس لديه محادثات.",
"Hello, {{name}}": " {{name}} مرحبا",
"Help": "مساعدة",
......@@ -251,29 +251,29 @@
"Light": "فاتح",
"Listening...": "جاري الاستماع",
"LLMs can make mistakes. Verify important information.": "يمكن أن تصدر بعض الأخطاء. لذلك يجب التحقق من المعلومات المهمة",
"LTR": "",
"LTR": "من جهة اليسار إلى اليمين",
"Made by OpenWebUI Community": "OpenWebUI تم إنشاؤه بواسطة مجتمع ",
"Make sure to enclose them with": "تأكد من إرفاقها",
"Manage LiteLLM Models": "LiteLLM إدارة نماذج ",
"Manage Models": "إدارة النماذج",
"Manage Ollama Models": "Ollama إدارة موديلات ",
"March": "",
"March": "مارس",
"Max Tokens": "Max Tokens",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "يمكن تنزيل 3 نماذج كحد أقصى في وقت واحد. الرجاء معاودة المحاولة في وقت لاحق.",
"May": "",
"May": "مايو",
"Memories accessible by LLMs will be shown here.": "",
"Memory": "",
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
"Memory": "الذاكرة",
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "لن تتم مشاركة الرسائل التي ترسلها بعد إنشاء الرابط الخاص بك. سيتمكن المستخدمون الذين لديهم عنوان URL من عرض الدردشة المشتركة",
"Minimum Score": "الحد الأدنى من النقاط",
"Mirostat": "Mirostat",
"Mirostat Eta": "Mirostat Eta",
"Mirostat Tau": "Mirostat Tau",
"MMMM DD, YYYY": "MMMM DD, YYYY",
"MMMM DD, YYYY HH:mm": "MMMM DD, YYYY HH:mm",
"Model '{{modelName}}' has been successfully downloaded.": "موديل '{{modelName}}'تم تحميله بنجاح",
"Model '{{modelTag}}' is already in queue for downloading.": "موديل '{{modelTag}}' جاري تحميلة الرجاء الانتظار",
"Model {{modelId}} not found": "موديل {{modelId}} لم يوجد",
"Model {{modelName}} already exists.": "موجود {{modelName}} موديل ",
"Model '{{modelName}}' has been successfully downloaded.": "تم تحميل النموذج '{{modelName}}' بنجاح",
"Model '{{modelTag}}' is already in queue for downloading.": "النموذج '{{modelTag}}' موجود بالفعل في قائمة الانتظار للتحميل",
"Model {{modelId}} not found": "لم يتم العثور على النموذج {{modelId}}.",
"Model {{modelName}} already exists.": "موجود {{modelName}} موديل بالفعل",
"Model filesystem path detected. Model shortname is required for update, cannot continue.": "تم اكتشاف مسار نظام الملفات النموذجي. الاسم المختصر للنموذج مطلوب للتحديث، ولا يمكن الاستمرار.",
"Model Name": "أسم الموديل",
"Model not selected": "لم تختار موديل",
......@@ -306,7 +306,7 @@
"Okay, Let's Go!": "حسنا دعنا نذهب!",
"OLED Dark": "OLED داكن",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama الرابط الافتراضي",
"Ollama API": "",
"Ollama Version": "Ollama الاصدار",
"On": "تشغيل",
"Only": "فقط",
......@@ -332,7 +332,7 @@
"PDF Extract Images (OCR)": "PDF أستخرج الصور (OCR)",
"pending": "قيد الانتظار",
"Permission denied when accessing microphone: {{error}}": "{{error}} تم رفض الإذن عند الوصول إلى الميكروفون ",
"Personalization": "",
"Personalization": "التخصيص",
"Plain text (.txt)": "نص عادي (.txt)",
"Playground": "مكان التجربة",
"Positive attitude": "موقف ايجابي",
......@@ -362,7 +362,7 @@
"Repeat Last N": "N كرر آخر",
"Repeat Penalty": "كرر المخالفة",
"Request Mode": "وضع الطلب",
"Reranking Model": "",
"Reranking Model": "إعادة تقييم النموذج",
"Reranking model disabled": "تم تعطيل نموذج إعادة الترتيب",
"Reranking model set to \"{{reranking_model}}\"": "تم ضبط نموذج إعادة الترتيب على \"{{reranking_model}}\"",
"Reset Vector Storage": "إعادة تعيين تخزين المتجهات",
......@@ -370,7 +370,7 @@
"Role": "منصب",
"Rosé Pine": "Rosé Pine",
"Rosé Pine Dawn": "Rosé Pine Dawn",
"RTL": "",
"RTL": "من اليمين إلى اليسار",
"Save": "حفظ",
"Save & Create": "حفظ وإنشاء",
"Save & Update": "حفظ وتحديث",
......@@ -391,17 +391,17 @@
"Select a model": "أختار الموديل",
"Select an Ollama instance": "أختار سيرفر ",
"Select model": " أختار موديل",
"Send": "",
"Send": "تم",
"Send a Message": "يُرجى إدخال طلبك هنا",
"Send message": "يُرجى إدخال طلبك هنا.",
"September": "سبتمبر",
"Server connection verified": "تم التحقق من اتصال الخادم",
"Set as default": "الافتراضي",
"Set Default Model": "تفعيد الموديل الافتراضي",
"Set embedding model (e.g. {{model}})": "",
"Set embedding model (e.g. {{model}})": "ضبط نموذج المتجهات (على سبيل المثال: {{model}})",
"Set Image Size": "حجم الصورة",
"Set Model": "ضبط النموذج",
"Set reranking model (e.g. {{model}})": "",
"Set reranking model (e.g. {{model}})": "ضبط نموذج إعادة الترتيب (على سبيل المثال: {{model}})",
"Set Steps": "ضبط الخطوات",
"Set Task Model": "",
"Set Voice": "ضبط الصوت",
......@@ -486,8 +486,8 @@
"Version": "إصدار",
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "تحذير: إذا قمت بتحديث أو تغيير نموذج التضمين الخاص بك، فستحتاج إلى إعادة استيراد كافة المستندات.",
"Web": "Web",
"Web Loader Settings": "",
"Web Params": "",
"Web Loader Settings": "Web تحميل اعدادات",
"Web Params": "Web تحميل اعدادات",
"Web Search Disabled": "",
"Web Search Enabled": "",
"Webhook URL": "Webhook الرابط",
......@@ -497,15 +497,15 @@
"What’s New in": "ما هو الجديد",
"When history is turned off, new chats on this browser won't appear in your history on any of your devices.": "عند إيقاف تشغيل السجل، لن تظهر الدردشات الجديدة على هذا المتصفح في سجلك على أي من أجهزتك.",
"Whisper (Local)": "Whisper (Local)",
"Workspace": "",
"Workspace": "مساحة العمل",
"Write a prompt suggestion (e.g. Who are you?)": "اكتب اقتراحًا سريعًا (على سبيل المثال، من أنت؟)",
"Write a summary in 50 words that summarizes [topic or keyword].": "اكتب ملخصًا في 50 كلمة يلخص [الموضوع أو الكلمة الرئيسية]",
"Yesterday": "أمس",
"You": "",
"You": "انت",
"You have no archived conversations.": "لا تملك محادثات محفوظه",
"You have shared this chat": "تم مشاركة هذه المحادثة",
"You're a helpful assistant.": "مساعدك المفيد هنا",
"You're now logged in.": "لقد قمت الآن بتسجيل الدخول.",
"Youtube": "Youtube",
"Youtube Loader Settings": ""
"Youtube Loader Settings": "Youtube تحميل اعدادات"
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -4,14 +4,14 @@
"(e.g. `sh webui.sh --api`)": "(z.B. `sh webui.sh --api`)",
"(latest)": "(neueste)",
"{{modelName}} is thinking...": "{{modelName}} denkt nach...",
"{{user}}'s Chats": "",
"{{user}}'s Chats": "{{user}}s Chats",
"{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich",
"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
"a user": "ein Benutzer",
"About": "Über",
"Account": "Account",
"Accurate information": "Genaue Information",
"Add": "",
"Add": "Hinzufügen",
"Add a model": "Füge ein Modell hinzu",
"Add a model tag name": "Benenne deinen Modell-Tag",
"Add a short description about what this modelfile does": "Füge eine kurze Beschreibung hinzu, was dieses Modelfile kann",
......@@ -20,7 +20,7 @@
"Add custom prompt": "Eigenen Prompt hinzufügen",
"Add Docs": "Dokumente hinzufügen",
"Add Files": "Dateien hinzufügen",
"Add Memory": "",
"Add Memory": "Speicher hinzufügen",
"Add message": "Nachricht eingeben",
"Add Model": "Modell hinzufügen",
"Add Tags": "Tags hinzufügen",
......@@ -69,8 +69,8 @@
"Categories": "Kategorien",
"Change Password": "Passwort ändern",
"Chat": "Chat",
"Chat Bubble UI": "",
"Chat direction": "",
"Chat Bubble UI": "Chat Bubble UI",
"Chat direction": "Chat Richtung",
"Chat History": "Chat Verlauf",
"Chat History is off for this browser.": "Chat Verlauf ist für diesen Browser ausgeschaltet.",
"Chats": "Chats",
......@@ -92,8 +92,8 @@
"Click on the user role button to change a user's role.": "Klicke auf die Benutzerrollenschaltfläche, um die Rolle eines Benutzers zu ändern.",
"Close": "Schließe",
"Collection": "Kollektion",
"ComfyUI": "",
"ComfyUI Base URL": "",
"ComfyUI": "ComfyUI",
"ComfyUI Base URL": "ComfyUI Base URL",
"ComfyUI Base URL is required.": "ComfyUI Base URL wird benötigt.",
"Command": "Befehl",
"Confirm Password": "Passwort bestätigen",
......@@ -164,19 +164,19 @@
"Edit Doc": "Dokument bearbeiten",
"Edit User": "Benutzer bearbeiten",
"Email": "E-Mail",
"Embedding Model": "",
"Embedding Model Engine": "",
"Embedding model set to \"{{embedding_model}}\"": "",
"Embedding Model": "Embedding-Modell",
"Embedding Model Engine": "Embedding-Modell-Engine",
"Embedding model set to \"{{embedding_model}}\"": "Embedding-Modell auf \"{{embedding_model}}\" gesetzt",
"Enable Chat History": "Chat-Verlauf aktivieren",
"Enable New Sign Ups": "Neue Anmeldungen aktivieren",
"Enabled": "Aktiviert",
"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Stellen Sie sicher, dass Ihre CSV-Datei 4 Spalten in dieser Reihenfolge enthält: Name, E-Mail, Passwort, Rolle.",
"Enter {{role}} message here": "Gib die {{role}} Nachricht hier ein",
"Enter a detail about yourself for your LLMs to recall": "",
"Enter a detail about yourself for your LLMs to recall": "Geben Sie einen Detail über sich selbst ein, um für Ihre LLMs zu erinnern",
"Enter Chunk Overlap": "Gib den Chunk Overlap ein",
"Enter Chunk Size": "Gib die Chunk Size ein",
"Enter Image Size (e.g. 512x512)": "Gib die Bildgröße ein (z.B. 512x512)",
"Enter language codes": "",
"Enter language codes": "Geben Sie die Sprachcodes ein",
"Enter LiteLLM API Base URL (litellm_params.api_base)": "Gib die LiteLLM API BASE URL ein (litellm_params.api_base)",
"Enter LiteLLM API Key (litellm_params.api_key)": "Gib den LiteLLM API Key ein (litellm_params.api_key)",
"Enter LiteLLM API RPM (litellm_params.rpm)": "Gib die LiteLLM API RPM ein (litellm_params.rpm)",
......@@ -217,7 +217,7 @@
"Generating search query": "",
"Generation Info": "Generierungsinformationen",
"Good Response": "Gute Antwort",
"h:mm a": "",
"h:mm a": "h:mm a",
"has no conversations.": "hat keine Unterhaltungen.",
"Hello, {{name}}": "Hallo, {{name}}",
"Help": "Hilfe",
......@@ -251,7 +251,7 @@
"Light": "Hell",
"Listening...": "Hören...",
"LLMs can make mistakes. Verify important information.": "LLMs können Fehler machen. Überprüfe wichtige Informationen.",
"LTR": "",
"LTR": "LTR",
"Made by OpenWebUI Community": "Von der OpenWebUI-Community",
"Make sure to enclose them with": "Formatiere deine Variablen mit:",
"Manage LiteLLM Models": "LiteLLM-Modelle verwalten",
......@@ -261,9 +261,9 @@
"Max Tokens": "Maximale Tokens",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Es können maximal 3 Modelle gleichzeitig heruntergeladen werden. Bitte versuche es später erneut.",
"May": "Mai",
"Memories accessible by LLMs will be shown here.": "",
"Memory": "",
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
"Memories accessible by LLMs will be shown here.": "Memories, die von LLMs zugänglich sind, werden hier angezeigt.",
"Memory": "Memory",
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Nachdem Sie Ihren Link erstellt haben, werden Ihre Nachrichten nicht geteilt. Benutzer mit dem Link können den geteilten Chat sehen.",
"Minimum Score": "Mindestscore",
"Mirostat": "Mirostat",
"Mirostat Eta": "Mirostat Eta",
......@@ -294,11 +294,11 @@
"No results found": "Keine Ergebnisse gefunden",
"No search query generated": "",
"No search results found": "",
"No source available": "",
"No source available": "Keine Quelle verfügbar.",
"Not factually correct": "Nicht sachlich korrekt.",
"Not sure what to add?": "Nicht sicher, was hinzugefügt werden soll?",
"Not sure what to write? Switch to": "Nicht sicher, was du schreiben sollst? Wechsel zu",
"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Hinweis: Wenn du einen Mindestscore festlegst, wird die Suche nur Dokumente zurückgeben, deren Score größer oder gleich dem Mindestscore ist.",
"Notifications": "Desktop-Benachrichtigungen",
"November": "November",
"October": "Oktober",
......@@ -306,7 +306,7 @@
"Okay, Let's Go!": "Okay, los geht's!",
"OLED Dark": "OLED Dunkel",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama Basis URL",
"Ollama API": "",
"Ollama Version": "Ollama-Version",
"On": "Ein",
"Only": "Nur",
......@@ -332,7 +332,7 @@
"PDF Extract Images (OCR)": "Text von Bildern aus PDFs extrahieren (OCR)",
"pending": "ausstehend",
"Permission denied when accessing microphone: {{error}}": "Zugriff auf das Mikrofon verweigert: {{error}}",
"Personalization": "",
"Personalization": "Personalisierung",
"Plain text (.txt)": "Nur Text (.txt)",
"Playground": "Testumgebung",
"Positive attitude": "Positive Einstellung",
......@@ -364,13 +364,13 @@
"Request Mode": "Request-Modus",
"Reranking Model": "Reranking Modell",
"Reranking model disabled": "Rranking Modell deaktiviert",
"Reranking model set to \"{{reranking_model}}\"": "",
"Reranking model set to \"{{reranking_model}}\"": "Reranking Modell auf \"{{reranking_model}}\" gesetzt",
"Reset Vector Storage": "Vektorspeicher zurücksetzen",
"Response AutoCopy to Clipboard": "Antwort automatisch in die Zwischenablage kopieren",
"Role": "Rolle",
"Rosé Pine": "Rosé Pine",
"Rosé Pine Dawn": "Rosé Pine Dawn",
"RTL": "",
"RTL": "RTL",
"Save": "Speichern",
"Save & Create": "Speichern und erstellen",
"Save & Update": "Speichern und aktualisieren",
......@@ -391,17 +391,17 @@
"Select a model": "Ein Modell auswählen",
"Select an Ollama instance": "Eine Ollama Instanz auswählen",
"Select model": "Modell auswählen",
"Send": "",
"Send": "Senden",
"Send a Message": "Eine Nachricht senden",
"Send message": "Nachricht senden",
"September": "September",
"Server connection verified": "Serververbindung überprüft",
"Set as default": "Als Standard festlegen",
"Set Default Model": "Standardmodell festlegen",
"Set embedding model (e.g. {{model}})": "",
"Set embedding model (e.g. {{model}})": "Eingabemodell festlegen (z.B. {{model}})",
"Set Image Size": "Bildgröße festlegen",
"Set Model": "Modell festlegen",
"Set reranking model (e.g. {{model}})": "",
"Set reranking model (e.g. {{model}})": "Rerankingmodell festlegen (z.B. {{model}})",
"Set Steps": "Schritte festlegen",
"Set Task Model": "",
"Set Voice": "Stimme festlegen",
......@@ -486,7 +486,7 @@
"Version": "Version",
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Warnung: Wenn du dein Einbettungsmodell aktualisierst oder änderst, musst du alle Dokumente erneut importieren.",
"Web": "Web",
"Web Loader Settings": "",
"Web Loader Settings": "Web Loader Einstellungen",
"Web Params": "Web Parameter",
"Web Search Disabled": "",
"Web Search Enabled": "",
......@@ -497,15 +497,15 @@
"What’s New in": "Was gibt's Neues in",
"When history is turned off, new chats on this browser won't appear in your history on any of your devices.": "Wenn die Historie ausgeschaltet ist, werden neue Chats nicht in deiner Historie auf deine Geräte angezeigt.",
"Whisper (Local)": "Whisper (Lokal)",
"Workspace": "",
"Workspace": "Arbeitsbereich",
"Write a prompt suggestion (e.g. Who are you?)": "Gebe einen Prompt-Vorschlag ein (z.B. Wer bist du?)",
"Write a summary in 50 words that summarizes [topic or keyword].": "Schreibe eine kurze Zusammenfassung in 50 Wörtern, die [Thema oder Schlüsselwort] zusammenfasst.",
"Yesterday": "Gestern",
"You": "",
"You": "Du",
"You have no archived conversations.": "Du hast keine archivierten Unterhaltungen.",
"You have shared this chat": "Du hast diesen Chat",
"You're a helpful assistant.": "Du bist ein hilfreicher Assistent.",
"You're now logged in.": "Du bist nun eingeloggt.",
"Youtube": "YouTube",
"Youtube Loader Settings": ""
"Youtube Loader Settings": "YouTube-Ladeeinstellungen"
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment