Unverified Commit 1a9a56d6 authored by Timothy Jaeryang Baek's avatar Timothy Jaeryang Baek Committed by GitHub
Browse files

Merge pull request #844 from open-webui/litellm

feat: direct litellm integration
parents 437d7ff6 ec6f53e2
......@@ -5,6 +5,22 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.103] - UNRELEASED
### Added
- **Built-in LiteLLM Proxy**: Open WebUI now ships with LiteLLM Proxy.
- **Image Generation Enhancements**: Advanced Settings + Image Preview Feature.
### Fixed
- Issue with RAG scan that stops loading documents as soon as it reaches a file with unsupported mime type (or any other exceptions). (#866)
### Changed
- Ollama is no longer required to run Open WebUI.
- Our documentation can be found here https://docs.openwebui.com/
## [0.1.102] - 2024-02-22
### Added
......
......@@ -6,6 +6,11 @@ uploads
*.db
_test
Pipfile
data/*
!/data
/data/*
!/data/litellm
/data/litellm/*
!data/litellm/config.yaml
!data/config.json
.webui_secret_key
\ No newline at end of file
......@@ -49,7 +49,7 @@ async def toggle_enabled(request: Request, user=Depends(get_admin_user)):
app.state.ENABLED = not app.state.ENABLED
return app.state.ENABLED
except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
class UrlUpdateForm(BaseModel):
......@@ -109,7 +109,8 @@ def get_models(user=Depends(get_current_user)):
models = r.json()
return models
except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
app.state.ENABLED = False
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
@app.get("/models/default")
......@@ -120,7 +121,8 @@ async def get_default_model(user=Depends(get_admin_user)):
return {"model": options["sd_model_checkpoint"]}
except Exception as e:
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
app.state.ENABLED = False
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
class UpdateModelForm(BaseModel):
......@@ -190,4 +192,4 @@ def generate_image(
return r.json()
except Exception as e:
print(e)
raise HTTPException(status_code=r.status_code, detail=ERROR_MESSAGES.DEFAULT(e))
raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
......@@ -83,8 +83,6 @@ for version in soup.find_all("h2"):
# Find the next sibling that is a h3 tag (section title)
current = version.find_next_sibling()
print(current)
while current and current.name != "h2":
if current.name == "h3":
section_title = current.get_text().lower() # e.g., "added", "fixed"
......
general_settings: {}
litellm_settings: {}
model_list: []
router_settings: {}
......@@ -2,25 +2,31 @@ from bs4 import BeautifulSoup
import json
import markdown
import time
import os
import sys
from fastapi import FastAPI, Request
from fastapi import FastAPI, Request, Depends
from fastapi.staticfiles import StaticFiles
from fastapi import HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.wsgi import WSGIMiddleware
from fastapi.middleware.cors import CORSMiddleware
from starlette.exceptions import HTTPException as StarletteHTTPException
from litellm.proxy.proxy_server import ProxyConfig, initialize
from litellm.proxy.proxy_server import app as litellm_app
from apps.ollama.main import app as ollama_app
from apps.openai.main import app as openai_app
from apps.audio.main import app as audio_app
from apps.images.main import app as images_app
from apps.rag.main import app as rag_app
from apps.web.main import app as webui_app
from config import WEBUI_NAME, ENV, VERSION, CHANGELOG, FRONTEND_BUILD_DIR
from utils.utils import get_http_authorization_cred, get_current_user
class SPAStaticFiles(StaticFiles):
......@@ -34,6 +40,21 @@ class SPAStaticFiles(StaticFiles):
raise ex
proxy_config = ProxyConfig()
async def config():
router, model_list, general_settings = await proxy_config.load_config(
router=None, config_file_path="./data/litellm/config.yaml"
)
await initialize(config="./data/litellm/config.yaml", telemetry=False)
async def startup():
await config()
app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None)
origins = ["*"]
......@@ -47,6 +68,11 @@ app.add_middleware(
)
@app.on_event("startup")
async def on_startup():
await startup()
@app.middleware("http")
async def check_url(request: Request, call_next):
start_time = int(time.time())
......@@ -57,7 +83,23 @@ async def check_url(request: Request, call_next):
return response
@litellm_app.middleware("http")
async def auth_middleware(request: Request, call_next):
auth_header = request.headers.get("Authorization", "")
if ENV != "dev":
try:
user = get_current_user(get_http_authorization_cred(auth_header))
print(user)
except Exception as e:
return JSONResponse(status_code=400, content={"detail": str(e)})
response = await call_next(request)
return response
app.mount("/api/v1", webui_app)
app.mount("/litellm/api", litellm_app)
app.mount("/ollama/api", ollama_app)
app.mount("/openai/api", openai_app)
......
......@@ -16,6 +16,9 @@ aiohttp
peewee
bcrypt
litellm
apscheduler
langchain
langchain-community
chromadb
......
......@@ -58,6 +58,17 @@ def extract_token_from_auth_header(auth_header: str):
return auth_header[len("Bearer ") :]
def get_http_authorization_cred(auth_header: str):
try:
scheme, credentials = auth_header.split(" ")
return {
"scheme": scheme,
"credentials": credentials,
}
except:
raise ValueError(ERROR_MESSAGES.INVALID_TOKEN)
def get_current_user(
auth_token: HTTPAuthorizationCredentials = Depends(bearer_security),
):
......
{
"name": "open-webui",
"version": "0.1.102",
"version": "0.1.103",
"private": true,
"scripts": {
"dev": "vite dev --host",
......
import { LITELLM_API_BASE_URL } from '$lib/constants';
export const getLiteLLMModels = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/v1/models`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models
? models
.map((model) => ({
id: model.id,
name: model.name ?? model.id,
external: true,
source: 'litellm'
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);
})
: models;
};
export const getLiteLLMModelInfo = async (token: string = '') => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/info`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
const models = Array.isArray(res) ? res : res?.data ?? null;
return models;
};
type AddLiteLLMModelForm = {
name: string;
model: string;
api_base: string;
api_key: string;
rpm: string;
};
export const addLiteLLMModel = async (token: string = '', payload: AddLiteLLMModelForm) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/new`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
model_name: payload.name,
litellm_params: {
model: payload.model,
...(payload.api_base === '' ? {} : { api_base: payload.api_base }),
...(payload.api_key === '' ? {} : { api_key: payload.api_key }),
...(isNaN(parseInt(payload.rpm)) ? {} : { rpm: parseInt(payload.rpm) })
}
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};
export const deleteLiteLLMModel = async (token: string = '', id: string) => {
let error = null;
const res = await fetch(`${LITELLM_API_BASE_URL}/model/delete`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
id: id
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = `LiteLLM: ${err?.error?.message ?? 'Network Problem'}`;
return [];
});
if (error) {
throw error;
}
return res;
};
......@@ -128,9 +128,11 @@ export const getOllamaModels = async (token: string = '') => {
throw error;
}
return (res?.models ?? []).sort((a, b) => {
return a.name.localeCompare(b.name);
});
return (res?.models ?? [])
.map((model) => ({ id: model.model, name: model.name ?? model.model, ...model }))
.sort((a, b) => {
return a.name.localeCompare(b.name);
});
};
// TODO: migrate to backend
......
......@@ -163,7 +163,7 @@ export const getOpenAIModels = async (token: string = '') => {
return models
? models
.map((model) => ({ name: model.id, external: true }))
.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
.sort((a, b) => {
return a.name.localeCompare(b.name);
})
......@@ -200,17 +200,21 @@ export const getOpenAIModelsDirect = async (
const models = Array.isArray(res) ? res : res?.data ?? null;
return models
.map((model) => ({ name: model.id, external: true }))
.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
.filter((model) => (base_url.includes('openai') ? model.name.includes('gpt') : true))
.sort((a, b) => {
return a.name.localeCompare(b.name);
});
};
export const generateOpenAIChatCompletion = async (token: string = '', body: object) => {
export const generateOpenAIChatCompletion = async (
token: string = '',
body: object,
url: string = OPENAI_API_BASE_URL
) => {
let error = null;
const res = await fetch(`${OPENAI_API_BASE_URL}/chat/completions`, {
const res = await fetch(`${url}/chat/completions`, {
method: 'POST',
headers: {
Authorization: `Bearer ${token}`,
......
......@@ -25,7 +25,7 @@
$: if (selectedModels.length > 0 && $models.length > 0) {
selectedModels = selectedModels.map((model) =>
$models.map((m) => m.name).includes(model) ? model : ''
$models.map((m) => m.id).includes(model) ? model : ''
);
}
</script>
......@@ -45,7 +45,7 @@
{#if model.name === 'hr'}
<hr />
{:else}
<option value={model.name} class="text-gray-700 text-lg"
<option value={model.id} class="text-gray-700 text-lg"
>{model.name +
`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
>
......
......@@ -38,16 +38,18 @@
</div>
</div>
<hr class=" dark:border-gray-700" />
{#if ollamaVersion}
<hr class=" dark:border-gray-700" />
<div>
<div class=" mb-2.5 text-sm font-medium">Ollama Version</div>
<div class="flex w-full">
<div class="flex-1 text-xs text-gray-700 dark:text-gray-200">
{ollamaVersion ?? 'N/A'}
<div>
<div class=" mb-2.5 text-sm font-medium">Ollama Version</div>
<div class="flex w-full">
<div class="flex-1 text-xs text-gray-700 dark:text-gray-200">
{ollamaVersion ?? 'N/A'}
</div>
</div>
</div>
</div>
{/if}
<hr class=" dark:border-gray-700" />
......
......@@ -3,7 +3,7 @@
import { createEventDispatcher, onMount } from 'svelte';
const dispatch = createEventDispatcher();
import { getOllamaAPIUrl, updateOllamaAPIUrl } from '$lib/apis/ollama';
import { getOllamaAPIUrl, getOllamaVersion, updateOllamaAPIUrl } from '$lib/apis/ollama';
import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai';
import toast from 'svelte-french-toast';
......@@ -15,6 +15,9 @@
let OPENAI_API_KEY = '';
let OPENAI_API_BASE_URL = '';
let showOpenAI = false;
let showLiteLLM = false;
const updateOpenAIHandler = async () => {
OPENAI_API_BASE_URL = await updateOpenAIUrl(localStorage.token, OPENAI_API_BASE_URL);
OPENAI_API_KEY = await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
......@@ -24,11 +27,14 @@
const updateOllamaAPIUrlHandler = async () => {
API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL);
const _models = await getModels('ollama');
if (_models.length > 0) {
const ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => {
return null;
});
if (ollamaVersion) {
toast.success('Server connection verified');
await models.set(_models);
await models.set(await getModels());
}
};
......@@ -42,7 +48,7 @@
</script>
<form
class="flex flex-col h-full space-y-3 text-sm"
class="flex flex-col h-full justify-between text-sm"
on:submit|preventDefault={() => {
updateOpenAIHandler();
dispatch('save');
......@@ -53,81 +59,100 @@
// });
}}
>
<div>
<div class=" mb-2.5 text-sm font-medium">Ollama API URL</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter URL (e.g. http://localhost:11434/api)"
bind:value={API_BASE_URL}
/>
</div>
<button
class="px-3 bg-gray-200 hover:bg-gray-300 dark:bg-gray-600 dark:hover:bg-gray-700 rounded transition"
on:click={() => {
updateOllamaAPIUrlHandler();
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
</div>
<div class=" pr-1.5 overflow-y-scroll max-h-[21rem] space-y-3">
<div class=" space-y-3">
<div class="mt-2 space-y-2 pr-1.5">
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">OpenAI API</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showOpenAI = !showOpenAI;
}}>{showOpenAI ? 'Hide' : 'Show'}</button
>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
Trouble accessing Ollama?
<a
class=" text-gray-300 font-medium"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
Click here for help.
</a>
{#if showOpenAI}
<div>
<div class=" mb-2.5 text-sm font-medium">API Key</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Key"
bind:value={OPENAI_API_KEY}
autocomplete="off"
/>
</div>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Base URL"
bind:value={OPENAI_API_BASE_URL}
autocomplete="off"
/>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
WebUI will make requests to <span class=" text-gray-200"
>'{OPENAI_API_BASE_URL}/chat'</span
>
</div>
</div>
{/if}
</div>
</div>
</div>
<hr class=" dark:border-gray-700" />
<hr class=" dark:border-gray-700" />
<div class=" space-y-3">
<div>
<div class=" mb-2.5 text-sm font-medium">OpenAI API Key</div>
<div class=" mb-2.5 text-sm font-medium">Ollama API URL</div>
<div class="flex w-full">
<div class="flex-1">
<div class="flex-1 mr-2">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Key"
bind:value={OPENAI_API_KEY}
autocomplete="off"
placeholder="Enter URL (e.g. http://localhost:11434/api)"
bind:value={API_BASE_URL}
/>
</div>
<button
class="px-3 bg-gray-200 hover:bg-gray-300 dark:bg-gray-600 dark:hover:bg-gray-700 rounded transition"
on:click={() => {
updateOllamaAPIUrlHandler();
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
</div>
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">OpenAI API Base URL</div>
<div class="flex w-full">
<div class="flex-1">
<input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none"
placeholder="Enter OpenAI API Base URL"
bind:value={OPENAI_API_BASE_URL}
autocomplete="off"
/>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
WebUI will make requests to <span class=" text-gray-200">'{OPENAI_API_BASE_URL}/chat'</span>
Trouble accessing Ollama?
<a
class=" text-gray-300 font-medium"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
Click here for help.
</a>
</div>
</div>
</div>
......
......@@ -32,9 +32,11 @@
const getModels = async () => {
models = await getDiffusionModels(localStorage.token).catch((error) => {
toast.error(error);
return null;
return [];
});
selectedModel = await getDefaultDiffusionModel(localStorage.token).catch((error) => {
return '';
});
selectedModel = await getDefaultDiffusionModel(localStorage.token);
};
const updateAUTOMATIC1111UrlHandler = async () => {
......
......@@ -4,6 +4,7 @@
import { getOllamaModels } from '$lib/apis/ollama';
import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import Modal from '../common/Modal.svelte';
import Account from './Settings/Account.svelte';
......@@ -27,23 +28,29 @@
let selectedTab = 'general';
const getModels = async (type = 'all') => {
const models = [];
models.push(
...(await getOllamaModels(localStorage.token).catch((error) => {
toast.error(error);
return [];
}))
);
if (type === 'all') {
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
const getModels = async () => {
let models = await Promise.all([
await getOllamaModels(localStorage.token).catch((error) => {
console.log(error);
return null;
}),
await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error);
return null;
});
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
}
}),
await getLiteLLMModels(localStorage.token).catch((error) => {
console.log(error);
return null;
})
]);
models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models;
};
</script>
......
......@@ -5,6 +5,8 @@ export const APP_NAME = 'Open WebUI';
export const WEBUI_BASE_URL = dev ? `http://${location.hostname}:8080` : ``;
export const WEBUI_API_BASE_URL = `${WEBUI_BASE_URL}/api/v1`;
export const LITELLM_API_BASE_URL = `${WEBUI_BASE_URL}/litellm/api`;
export const OLLAMA_API_BASE_URL = `${WEBUI_BASE_URL}/ollama/api`;
export const OPENAI_API_BASE_URL = `${WEBUI_BASE_URL}/openai/api`;
export const AUDIO_API_BASE_URL = `${WEBUI_BASE_URL}/audio/api/v1`;
......
......@@ -11,6 +11,7 @@
import { getModelfiles } from '$lib/apis/modelfiles';
import { getPrompts } from '$lib/apis/prompts';
import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
import { getDocs } from '$lib/apis/documents';
import { getAllChatTags } from '$lib/apis/chats';
......@@ -43,24 +44,28 @@
let showShortcuts = false;
const getModels = async () => {
let models = [];
models.push(
...(await getOllamaModels(localStorage.token).catch((error) => {
toast.error(error);
return [];
}))
);
// $settings.OPENAI_API_BASE_URL ?? 'https://api.openai.com/v1',
// $settings.OPENAI_API_KEY
const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error);
return null;
});
models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
let models = await Promise.all([
await getOllamaModels(localStorage.token).catch((error) => {
console.log(error);
return null;
}),
await getOpenAIModels(localStorage.token).catch((error) => {
console.log(error);
return null;
}),
await getLiteLLMModels(localStorage.token).catch((error) => {
console.log(error);
return null;
})
]);
models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
return models;
};
......@@ -117,8 +122,6 @@
await models.set(await getModels());
});
await setOllamaVersion();
document.addEventListener('keydown', function (event) {
const isCtrlPressed = event.ctrlKey || event.metaKey; // metaKey is for Cmd key on Mac
// Check if the Shift key is pressed
......@@ -250,60 +253,6 @@
</div>
</div>
</div>
{:else if checkVersion(REQUIRED_OLLAMA_VERSION, ollamaVersion ?? '0')}
<div class="fixed w-full h-full flex z-50">
<div
class="absolute w-full h-full backdrop-blur-md bg-white/20 dark:bg-gray-900/50 flex justify-center"
>
<div class="m-auto pb-44 flex flex-col justify-center">
<div class="max-w-md">
<div class="text-center dark:text-white text-2xl font-medium z-50">
Connection Issue or Update Needed
</div>
<div class=" mt-4 text-center text-sm dark:text-gray-200 w-full">
Oops! It seems like your Ollama needs a little attention. <br
class=" hidden sm:flex"
/>We've detected either a connection hiccup or observed that you're using an older
version. Ensure you're on the latest Ollama version
<br class=" hidden sm:flex" />(version
<span class=" dark:text-white font-medium">{REQUIRED_OLLAMA_VERSION} or higher</span
>) or check your connection.
<div class="mt-1 text-sm">
Trouble accessing Ollama?
<a
class=" text-black dark:text-white font-semibold underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
Click here for help.
</a>
</div>
</div>
<div class=" mt-6 mx-auto relative group w-fit">
<button
class="relative z-20 flex px-5 py-2 rounded-full bg-white border border-gray-100 dark:border-none hover:bg-gray-100 transition font-medium text-sm"
on:click={async () => {
location.href = '/';
// await setOllamaVersion();
}}
>
Check Again
</button>
<button
class="text-xs text-center w-full mt-2 text-gray-400 underline"
on:click={async () => {
await setOllamaVersion(REQUIRED_OLLAMA_VERSION);
}}>Close</button
>
</div>
</div>
</div>
</div>
</div>
{:else if localDBChats.length > 0}
<div class="fixed w-full h-full flex z-50">
<div
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment