Commit 4002ead6 authored by Jun Siang Cheah's avatar Jun Siang Cheah
Browse files

feat: store model configs in the database

parent 1bacd5d9
...@@ -18,8 +18,9 @@ import requests ...@@ -18,8 +18,9 @@ import requests
from pydantic import BaseModel, ConfigDict from pydantic import BaseModel, ConfigDict
from typing import Optional, List from typing import Optional, List
from apps.web.models.models import Models
from utils.utils import get_verified_user, get_current_user, get_admin_user from utils.utils import get_verified_user, get_current_user, get_admin_user
from config import SRC_LOG_LEVELS, ENV, MODEL_CONFIG from config import SRC_LOG_LEVELS
from constants import MESSAGES from constants import MESSAGES
import os import os
...@@ -77,11 +78,12 @@ with open(LITELLM_CONFIG_DIR, "r") as file: ...@@ -77,11 +78,12 @@ with open(LITELLM_CONFIG_DIR, "r") as file:
app.state.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER.value app.state.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER.value
app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST.value app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST.value
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("litellm")
]
app.state.ENABLE = ENABLE_LITELLM app.state.ENABLE = ENABLE_LITELLM
app.state.CONFIG = litellm_config app.state.CONFIG = litellm_config
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("litellm", [])
# Global variable to store the subprocess reference # Global variable to store the subprocess reference
background_process = None background_process = None
...@@ -268,9 +270,9 @@ async def get_models(user=Depends(get_current_user)): ...@@ -268,9 +270,9 @@ async def get_models(user=Depends(get_current_user)):
( (
item item
for item in app.state.MODEL_CONFIG for item in app.state.MODEL_CONFIG
if item["name"] == model["model_name"] if item.id == model["model_name"]
), ),
{}, None,
), ),
} }
for model in app.state.CONFIG["model_list"] for model in app.state.CONFIG["model_list"]
...@@ -286,7 +288,7 @@ async def get_models(user=Depends(get_current_user)): ...@@ -286,7 +288,7 @@ async def get_models(user=Depends(get_current_user)):
def add_custom_info_to_model(model: dict): def add_custom_info_to_model(model: dict):
model["custom_info"] = next( model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {} (item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
) )
......
...@@ -29,7 +29,7 @@ import time ...@@ -29,7 +29,7 @@ import time
from urllib.parse import urlparse from urllib.parse import urlparse
from typing import Optional, List, Union from typing import Optional, List, Union
from apps.web.models.models import Models
from apps.web.models.users import Users from apps.web.models.users import Users
from constants import ERROR_MESSAGES from constants import ERROR_MESSAGES
from utils.utils import ( from utils.utils import (
...@@ -46,7 +46,6 @@ from config import ( ...@@ -46,7 +46,6 @@ from config import (
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
UPLOAD_DIR, UPLOAD_DIR,
MODEL_CONFIG,
AppConfig, AppConfig,
) )
from utils.misc import calculate_sha256 from utils.misc import calculate_sha256
...@@ -67,7 +66,9 @@ app.state.config = AppConfig() ...@@ -67,7 +66,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("ollama", []) app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("ollama")
]
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {} app.state.MODELS = {}
...@@ -179,7 +180,7 @@ async def get_all_models(): ...@@ -179,7 +180,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict): def add_custom_info_to_model(model: dict):
model["custom_info"] = next( model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["model"]), {} (item for item in app.state.MODEL_CONFIG if item.id == model["model"]), None
) )
......
...@@ -10,7 +10,7 @@ import logging ...@@ -10,7 +10,7 @@ import logging
from pydantic import BaseModel from pydantic import BaseModel
from apps.web.models.models import Models
from apps.web.models.users import Users from apps.web.models.users import Users
from constants import ERROR_MESSAGES from constants import ERROR_MESSAGES
from utils.utils import ( from utils.utils import (
...@@ -27,7 +27,6 @@ from config import ( ...@@ -27,7 +27,6 @@ from config import (
CACHE_DIR, CACHE_DIR,
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
MODEL_CONFIG,
AppConfig, AppConfig,
) )
from typing import List, Optional from typing import List, Optional
...@@ -53,7 +52,9 @@ app.state.config = AppConfig() ...@@ -53,7 +52,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("openai", []) app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("openai")
]
app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
...@@ -262,7 +263,7 @@ async def get_all_models(): ...@@ -262,7 +263,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict): def add_custom_info_to_model(model: dict):
model["custom_info"] = next( model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {} (item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
) )
......
"""Peewee migrations -- 002_add_local_sharing.py. """Peewee migrations -- 008_add_models.py.
Some examples (model - class or model name):: Some examples (model - class or model name)::
...@@ -37,43 +37,24 @@ with suppress(ImportError): ...@@ -37,43 +37,24 @@ with suppress(ImportError):
def migrate(migrator: Migrator, database: pw.Database, *, fake=False): def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your migrations here.""" """Write your migrations here."""
# Adding fields created_at and updated_at to the 'user' table @migrator.create_model
migrator.add_fields( class Model(pw.Model):
"user", id = pw.TextField()
created_at=pw.BigIntegerField(null=True), # Allow null for transition source = pw.TextField()
updated_at=pw.BigIntegerField(null=True), # Allow null for transition base_model = pw.TextField(null=True)
last_active_at=pw.BigIntegerField(null=True), # Allow null for transition name = pw.TextField()
) params = pw.TextField()
# Populate the new fields from an existing 'timestamp' field class Meta:
migrator.sql( table_name = "model"
'UPDATE "user" SET created_at = timestamp, updated_at = timestamp, last_active_at = timestamp WHERE timestamp IS NOT NULL'
)
# Now that the data has been copied, remove the original 'timestamp' field indexes = (
migrator.remove_fields("user", "timestamp") # Create a unique index on the id, source columns
(("id", "source"), True),
# Update the fields to be not null now that they are populated )
migrator.change_fields(
"user",
created_at=pw.BigIntegerField(null=False),
updated_at=pw.BigIntegerField(null=False),
last_active_at=pw.BigIntegerField(null=False),
)
def rollback(migrator: Migrator, database: pw.Database, *, fake=False): def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your rollback migrations here.""" """Write your rollback migrations here."""
# Recreate the timestamp field initially allowing null values for safe transition migrator.remove_model("model")
migrator.add_fields("user", timestamp=pw.BigIntegerField(null=True))
# Copy the earliest created_at date back into the new timestamp field
# This assumes created_at was originally a copy of timestamp
migrator.sql('UPDATE "user" SET timestamp = created_at')
# Remove the created_at and updated_at fields
migrator.remove_fields("user", "created_at", "updated_at", "last_active_at")
# Finally, alter the timestamp field to not allow nulls if that was the original setting
migrator.change_fields("user", timestamp=pw.BigIntegerField(null=False))
import json
from typing import Optional
import peewee as pw
from playhouse.shortcuts import model_to_dict
from pydantic import BaseModel
from apps.web.internal.db import DB
####################
# Models DB Schema
####################
# ModelParams is a model for the data stored in the params field of the Model table
# It isn't currently used in the backend, but it's here as a reference
class ModelParams(BaseModel):
"""
A Pydantic model that represents the parameters of a model.
Attributes:
description (str): A description of the model.
vision_capable (bool): A flag indicating if the model is capable of vision and thus image inputs.
"""
description: str
vision_capable: bool
class Model(pw.Model):
id = pw.TextField()
"""
The model's id as used in the API. If set to an existing model, it will override the model.
"""
source = pw.TextField()
"""
The source of the model, e.g., ollama, openai, or litellm.
"""
base_model = pw.TextField(null=True)
"""
An optional pointer to the actual model that should be used when proxying requests.
Currently unused - but will be used to support Modelfile like behaviour in the future
"""
name = pw.TextField()
"""
The human-readable display name of the model.
"""
params = pw.TextField()
"""
Holds a JSON encoded blob of parameters, see `ModelParams`.
"""
class Meta:
database = DB
indexes = (
# Create a unique index on the id, source columns
(("id", "source"), True),
)
class ModelModel(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: str
def to_form(self) -> "ModelForm":
return ModelForm(**{**self.model_dump(), "params": json.loads(self.params)})
####################
# Forms
####################
class ModelForm(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: dict
def to_db_model(self) -> ModelModel:
return ModelModel(**{**self.model_dump(), "params": json.dumps(self.params)})
class ModelsTable:
def __init__(
self,
db: pw.SqliteDatabase | pw.PostgresqlDatabase,
):
self.db = db
self.db.create_tables([Model])
def get_all_models(self) -> list[ModelModel]:
return [ModelModel(**model_to_dict(model)) for model in Model.select()]
def get_all_models_by_source(self, source: str) -> list[ModelModel]:
return [
ModelModel(**model_to_dict(model))
for model in Model.select().where(Model.source == source)
]
def update_all_models(self, models: list[ModelForm]) -> bool:
try:
with self.db.atomic():
# Fetch current models from the database
current_models = self.get_all_models()
current_model_dict = {
(model.id, model.source): model for model in current_models
}
# Create a set of model IDs and sources from the current models and the new models
current_model_keys = set(current_model_dict.keys())
new_model_keys = set((model.id, model.source) for model in models)
# Determine which models need to be created, updated, or deleted
models_to_create = [
model
for model in models
if (model.id, model.source) not in current_model_keys
]
models_to_update = [
model
for model in models
if (model.id, model.source) in current_model_keys
]
models_to_delete = current_model_keys - new_model_keys
# Perform the necessary database operations
for model in models_to_create:
Model.create(**model.to_db_model().model_dump())
for model in models_to_update:
Model.update(**model.to_db_model().model_dump()).where(
(Model.id == model.id) & (Model.source == model.source)
).execute()
for model_id, model_source in models_to_delete:
Model.delete().where(
(Model.id == model_id) & (Model.source == model_source)
).execute()
return True
except Exception as e:
return False
Models = ModelsTable(DB)
...@@ -549,10 +549,6 @@ WEBHOOK_URL = PersistentConfig( ...@@ -549,10 +549,6 @@ WEBHOOK_URL = PersistentConfig(
ENABLE_ADMIN_EXPORT = os.environ.get("ENABLE_ADMIN_EXPORT", "True").lower() == "true" ENABLE_ADMIN_EXPORT = os.environ.get("ENABLE_ADMIN_EXPORT", "True").lower() == "true"
MODEL_CONFIG = PersistentConfig(
"CONFIG_DATA", "models", {"ollama": [], "litellm": [], "openai": []}
)
#################################### ####################################
# WEBUI_SECRET_KEY # WEBUI_SECRET_KEY
#################################### ####################################
......
...@@ -35,9 +35,9 @@ from apps.web.main import app as webui_app ...@@ -35,9 +35,9 @@ from apps.web.main import app as webui_app
import asyncio import asyncio
from pydantic import BaseModel from pydantic import BaseModel
from typing import List from typing import List, Optional
from apps.web.models.models import Models, ModelModel, ModelForm
from utils.utils import get_admin_user from utils.utils import get_admin_user
from apps.rag.utils import rag_messages from apps.rag.utils import rag_messages
...@@ -59,7 +59,6 @@ from config import ( ...@@ -59,7 +59,6 @@ from config import (
SRC_LOG_LEVELS, SRC_LOG_LEVELS,
WEBHOOK_URL, WEBHOOK_URL,
ENABLE_ADMIN_EXPORT, ENABLE_ADMIN_EXPORT,
MODEL_CONFIG,
AppConfig, AppConfig,
) )
from constants import ERROR_MESSAGES from constants import ERROR_MESSAGES
...@@ -113,7 +112,7 @@ app.state.config = AppConfig() ...@@ -113,7 +112,7 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.config.MODEL_CONFIG = MODEL_CONFIG app.state.MODEL_CONFIG = [model.to_form() for model in Models.get_all_models()]
app.state.config.WEBHOOK_URL = WEBHOOK_URL app.state.config.WEBHOOK_URL = WEBHOOK_URL
...@@ -310,43 +309,40 @@ async def update_model_filter_config( ...@@ -310,43 +309,40 @@ async def update_model_filter_config(
} }
class ModelConfig(BaseModel):
id: str
name: str
description: str
vision_capable: bool
class SetModelConfigForm(BaseModel): class SetModelConfigForm(BaseModel):
ollama: List[ModelConfig] models: List[ModelForm]
litellm: List[ModelConfig]
openai: List[ModelConfig]
@app.post("/api/config/models") @app.post("/api/config/models")
async def update_model_config( async def update_model_config(
form_data: SetModelConfigForm, user=Depends(get_admin_user) form_data: SetModelConfigForm, user=Depends(get_admin_user)
): ):
data = form_data.model_dump() if not Models.update_all_models(form_data.models):
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT("Failed to update model config"),
)
ollama_app.state.MODEL_CONFIG = data.get("ollama", []) ollama_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "ollama"
]
openai_app.state.MODEL_CONFIG = data.get("openai", []) openai_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "openai"
]
litellm_app.state.MODEL_CONFIG = data.get("litellm", []) litellm_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "litellm"
]
app.state.config.MODEL_CONFIG = { app.state.MODEL_CONFIG = [model for model in form_data.models]
"ollama": ollama_app.state.MODEL_CONFIG,
"openai": openai_app.state.MODEL_CONFIG,
"litellm": litellm_app.state.MODEL_CONFIG,
}
return {"models": app.state.config.MODEL_CONFIG} return {"models": app.state.MODEL_CONFIG}
@app.get("/api/config/models") @app.get("/api/config/models")
async def get_model_config(user=Depends(get_admin_user)): async def get_model_config(user=Depends(get_admin_user)):
return {"models": app.state.config.MODEL_CONFIG} return {"models": app.state.MODEL_CONFIG}
@app.get("/api/webhook") @app.get("/api/webhook")
......
...@@ -226,16 +226,18 @@ export const getModelConfig = async (token: string): Promise<GlobalModelConfig> ...@@ -226,16 +226,18 @@ export const getModelConfig = async (token: string): Promise<GlobalModelConfig>
export interface ModelConfig { export interface ModelConfig {
id: string; id: string;
name?: string; name: string;
source: string;
base_model?: string;
params: ModelParams;
}
export interface ModelParams {
description?: string; description?: string;
vision_capable?: boolean; vision_capable?: boolean;
} }
export interface GlobalModelConfig { export type GlobalModelConfig = ModelConfig[];
ollama: ModelConfig[];
litellm: ModelConfig[];
openai: ModelConfig[];
}
export const updateModelConfig = async (token: string, config: GlobalModelConfig) => { export const updateModelConfig = async (token: string, config: GlobalModelConfig) => {
let error = null; let error = null;
...@@ -246,7 +248,9 @@ export const updateModelConfig = async (token: string, config: GlobalModelConfig ...@@ -246,7 +248,9 @@ export const updateModelConfig = async (token: string, config: GlobalModelConfig
'Content-Type': 'application/json', 'Content-Type': 'application/json',
Authorization: `Bearer ${token}` Authorization: `Bearer ${token}`
}, },
body: JSON.stringify(config) body: JSON.stringify({
models: config
})
}) })
.then(async (res) => { .then(async (res) => {
if (!res.ok) throw await res.json(); if (!res.ok) throw await res.json();
......
...@@ -34,7 +34,7 @@ export const getLiteLLMModels = async (token: string = '') => { ...@@ -34,7 +34,7 @@ export const getLiteLLMModels = async (token: string = '') => {
name: model.name ?? model.id, name: model.name ?? model.id,
external: true, external: true,
source: 'LiteLLM', source: 'LiteLLM',
custom_info: model.custom_info ?? {} custom_info: model.custom_info
})) }))
.sort((a, b) => { .sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
......
...@@ -234,7 +234,7 @@ export const getOpenAIModels = async (token: string = '') => { ...@@ -234,7 +234,7 @@ export const getOpenAIModels = async (token: string = '') => {
id: model.id, id: model.id,
name: model.name ?? model.id, name: model.name ?? model.id,
external: true, external: true,
custom_info: model.custom_info ?? {} custom_info: model.custom_info
})) }))
.sort((a, b) => { .sort((a, b) => {
return a.name.localeCompare(b.name); return a.name.localeCompare(b.name);
......
<script lang="ts"> <script lang="ts">
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { onMount, tick, getContext } from 'svelte'; import { onMount, tick, getContext } from 'svelte';
import { type Model, mobile, modelfiles, settings, showSidebar } from '$lib/stores'; import { type Model, mobile, modelfiles, settings, showSidebar, models } from '$lib/stores';
import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils'; import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
import { import {
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
export let stopResponse: Function; export let stopResponse: Function;
export let autoScroll = true; export let autoScroll = true;
export let selectedModel: Model | undefined; export let selectedAtModel: Model | undefined;
export let selectedModels: [''];
let chatTextAreaElement: HTMLTextAreaElement; let chatTextAreaElement: HTMLTextAreaElement;
let filesInputElement; let filesInputElement;
...@@ -52,6 +53,8 @@ ...@@ -52,6 +53,8 @@
let speechRecognition; let speechRecognition;
let visionCapableState = 'all';
$: if (prompt) { $: if (prompt) {
if (chatTextAreaElement) { if (chatTextAreaElement) {
chatTextAreaElement.style.height = ''; chatTextAreaElement.style.height = '';
...@@ -59,6 +62,20 @@ ...@@ -59,6 +62,20 @@
} }
} }
$: {
if (selectedAtModel || selectedModels) {
visionCapableState = checkModelsAreVisionCapable();
if (visionCapableState === 'none') {
// Remove all image files
const fileCount = files.length;
files = files.filter((file) => file.type != 'image');
if (files.length < fileCount) {
toast.warning($i18n.t('All selected models do not support image input, removed images'));
}
}
}
}
let mediaRecorder; let mediaRecorder;
let audioChunks = []; let audioChunks = [];
let isRecording = false; let isRecording = false;
...@@ -326,6 +343,35 @@ ...@@ -326,6 +343,35 @@
} }
}; };
const checkModelsAreVisionCapable = () => {
let modelsToCheck = [];
if (selectedAtModel !== undefined) {
modelsToCheck = [selectedAtModel.id];
} else {
modelsToCheck = selectedModels;
}
if (modelsToCheck.length == 0 || modelsToCheck[0] == '') {
return 'all';
}
let visionCapableCount = 0;
for (const modelName of modelsToCheck) {
const model = $models.find((m) => m.id === modelName);
if (!model) {
continue;
}
if (model.custom_info?.params.vision_capable ?? true) {
visionCapableCount++;
}
}
if (visionCapableCount == modelsToCheck.length) {
return 'all';
} else if (visionCapableCount == 0) {
return 'none';
} else {
return 'some';
}
};
onMount(() => { onMount(() => {
window.setTimeout(() => chatTextAreaElement?.focus(), 0); window.setTimeout(() => chatTextAreaElement?.focus(), 0);
...@@ -358,11 +404,9 @@ ...@@ -358,11 +404,9 @@
inputFiles.forEach((file) => { inputFiles.forEach((file) => {
console.log(file, file.name.split('.').at(-1)); console.log(file, file.name.split('.').at(-1));
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) { if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) { if (visionCapableState == 'none') {
if (!(selectedModel.custom_info?.vision_capable ?? true)) { toast.error($i18n.t('Selected models do not support image inputs'));
toast.error($i18n.t('Selected model does not support image inputs.')); return;
return;
}
} }
let reader = new FileReader(); let reader = new FileReader();
reader.onload = (event) => { reader.onload = (event) => {
...@@ -500,12 +544,12 @@ ...@@ -500,12 +544,12 @@
bind:chatInputPlaceholder bind:chatInputPlaceholder
{messages} {messages}
on:select={(e) => { on:select={(e) => {
selectedModel = e.detail; selectedAtModel = e.detail;
chatTextAreaElement?.focus(); chatTextAreaElement?.focus();
}} }}
/> />
{#if selectedModel !== undefined} {#if selectedAtModel !== undefined}
<div <div
class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900" class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900"
> >
...@@ -514,7 +558,7 @@ ...@@ -514,7 +558,7 @@
crossorigin="anonymous" crossorigin="anonymous"
alt="model profile" alt="model profile"
class="size-5 max-w-[28px] object-cover rounded-full" class="size-5 max-w-[28px] object-cover rounded-full"
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedModel.id) src={$modelfiles.find((modelfile) => modelfile.tagName === selectedAtModel.id)
?.imageUrl ?? ?.imageUrl ??
($i18n.language === 'dg-DG' ($i18n.language === 'dg-DG'
? `/doge.png` ? `/doge.png`
...@@ -522,7 +566,7 @@ ...@@ -522,7 +566,7 @@
/> />
<div> <div>
Talking to <span class=" font-medium" Talking to <span class=" font-medium"
>{selectedModel.custom_info?.name ?? selectedModel.name} >{selectedAtModel.custom_info?.name ?? selectedAtModel.name}
</span> </span>
</div> </div>
</div> </div>
...@@ -530,7 +574,7 @@ ...@@ -530,7 +574,7 @@
<button <button
class="flex items-center" class="flex items-center"
on:click={() => { on:click={() => {
selectedModel = undefined; selectedAtModel = undefined;
}} }}
> >
<XMark /> <XMark />
...@@ -556,13 +600,11 @@ ...@@ -556,13 +600,11 @@
const _inputFiles = Array.from(inputFiles); const _inputFiles = Array.from(inputFiles);
_inputFiles.forEach((file) => { _inputFiles.forEach((file) => {
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) { if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) { if (visionCapableState === 'none') {
if (!(selectedModel.custom_info?.vision_capable ?? true)) { toast.error($i18n.t('Selected models do not support image inputs'));
toast.error($i18n.t('Selected model does not support image inputs.')); inputFiles = null;
inputFiles = null; filesInputElement.value = '';
filesInputElement.value = ''; return;
return;
}
} }
let reader = new FileReader(); let reader = new FileReader();
reader.onload = (event) => { reader.onload = (event) => {
...@@ -897,7 +939,7 @@ ...@@ -897,7 +939,7 @@
if (e.key === 'Escape') { if (e.key === 'Escape') {
console.log('Escape'); console.log('Escape');
selectedModel = undefined; selectedAtModel = undefined;
} }
}} }}
rows="1" rows="1"
......
...@@ -12,7 +12,12 @@ ...@@ -12,7 +12,12 @@
import { user, MODEL_DOWNLOAD_POOL, models, mobile } from '$lib/stores'; import { user, MODEL_DOWNLOAD_POOL, models, mobile } from '$lib/stores';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { capitalizeFirstLetter, getModels, splitStream } from '$lib/utils'; import {
capitalizeFirstLetter,
getModels,
sanitizeResponseContent,
splitStream
} from '$lib/utils';
import Tooltip from '$lib/components/common/Tooltip.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
...@@ -23,7 +28,12 @@ ...@@ -23,7 +28,12 @@
export let searchEnabled = true; export let searchEnabled = true;
export let searchPlaceholder = $i18n.t('Search a model'); export let searchPlaceholder = $i18n.t('Search a model');
export let items = [{ value: 'mango', label: 'Mango' }]; export let items: {
label: string;
value: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[key: string]: any;
} = [];
export let className = ' w-[30rem]'; export let className = ' w-[30rem]';
...@@ -248,12 +258,8 @@ ...@@ -248,12 +258,8 @@
<!-- {JSON.stringify(item.info)} --> <!-- {JSON.stringify(item.info)} -->
{#if item.info.external} {#if item.info.external}
<Tooltip <Tooltip content={`${item.info?.source ?? 'External'}`}>
content={`${item.info?.source ?? 'External'}${ <div class="">
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
>
<div class=" mr-2">
<svg <svg
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16" viewBox="0 0 16 16"
...@@ -279,11 +285,9 @@ ...@@ -279,11 +285,9 @@
item.info?.details?.quantization_level item.info?.details?.quantization_level
? item.info?.details?.quantization_level + ' ' ? item.info?.details?.quantization_level + ' '
: '' : ''
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}${ }${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
> >
<div class=" mr-2"> <div class="">
<svg <svg
xmlns="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg"
fill="none" fill="none"
...@@ -301,8 +305,31 @@ ...@@ -301,8 +305,31 @@
</div> </div>
</Tooltip> </Tooltip>
{/if} {/if}
{#if item.info?.custom_info?.params.description}
<Tooltip
content={`${sanitizeResponseContent(
item.info.custom_info?.params.description
).replaceAll('\n', '<br>')}`}
>
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-4 h-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.879 7.519c1.171-1.025 3.071-1.025 4.242 0 1.172 1.025 1.172 2.687 0 3.712-.203.179-.43.326-.67.442-.745.361-1.45.999-1.45 1.827v.75M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Zm-9 5.25h.008v.008H12v-.008Z"
/>
</svg>
</div>
</Tooltip>
{/if}
</div> </div>
{#if value === item.value} {#if value === item.value}
<div class="ml-auto"> <div class="ml-auto">
<Check /> <Check />
......
...@@ -80,8 +80,8 @@ ...@@ -80,8 +80,8 @@
const model = $models.find((m) => m.id === selectedModelId); const model = $models.find((m) => m.id === selectedModelId);
if (model) { if (model) {
modelName = model.custom_info?.name ?? model.name; modelName = model.custom_info?.name ?? model.name;
modelDescription = model.custom_info?.description ?? ''; modelDescription = model.custom_info?.params.description ?? '';
modelIsVisionCapable = model.custom_info?.vision_capable ?? false; modelIsVisionCapable = model.custom_info?.params.vision_capable ?? false;
} }
}; };
...@@ -521,13 +521,18 @@ ...@@ -521,13 +521,18 @@
const modelSource = const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai'; 'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
// Remove any existing config // Remove any existing config
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId); modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
// Add new config // Add new config
modelConfig[modelSource].push({ modelConfig.push({
id: selectedModelId, id: selectedModelId,
name: modelName, name: modelName,
description: modelDescription, source: modelSource,
vision_capable: modelIsVisionCapable params: {
description: modelDescription,
vision_capable: modelIsVisionCapable
}
}); });
await updateModelConfig(localStorage.token, modelConfig); await updateModelConfig(localStorage.token, modelConfig);
toast.success( toast.success(
...@@ -546,7 +551,9 @@ ...@@ -546,7 +551,9 @@
} }
const modelSource = const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai'; 'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId); modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
await updateModelConfig(localStorage.token, modelConfig); await updateModelConfig(localStorage.token, modelConfig);
toast.success( toast.success(
$i18n.t('Model info for {{modelName}} deleted successfully', { modelName: selectedModelId }) $i18n.t('Model info for {{modelName}} deleted successfully', { modelName: selectedModelId })
...@@ -559,18 +566,28 @@ ...@@ -559,18 +566,28 @@
}; };
onMount(async () => { onMount(async () => {
OLLAMA_URLS = await getOllamaUrls(localStorage.token).catch((error) => { console.log('mounting');
toast.error(error); await Promise.all([
return []; (async () => {
}); OLLAMA_URLS = await getOllamaUrls(localStorage.token).catch((error) => {
toast.error(error);
if (OLLAMA_URLS.length > 0) { return [];
selectedOllamaUrlIdx = 0; });
}
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token); if (OLLAMA_URLS.length > 0) {
modelConfig = await getModelConfig(localStorage.token); selectedOllamaUrlIdx = 0;
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false); }
})(),
(async () => {
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
})(),
(async () => {
modelConfig = await getModelConfig(localStorage.token);
})(),
(async () => {
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
})()
]);
}); });
</script> </script>
......
...@@ -325,7 +325,7 @@ ...@@ -325,7 +325,7 @@
.filter((model) => model.name !== 'hr') .filter((model) => model.name !== 'hr')
.map((model) => ({ .map((model) => ({
value: model.id, value: model.id,
label: model.name, label: model.custom_info?.name ?? model.name,
info: model info: model
}))} }))}
bind:value={selectedModelId} bind:value={selectedModelId}
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "التعليمات المتقدمة", "Advanced Parameters": "التعليمات المتقدمة",
"all": "الكل", "all": "الكل",
"All Documents": "جميع الملفات", "All Documents": "جميع الملفات",
"All selected models do not support image input, removed images": "",
"All Users": "جميع المستخدمين", "All Users": "جميع المستخدمين",
"Allow": "يسمح", "Allow": "يسمح",
"Allow Chat Deletion": "يستطيع حذف المحادثات", "Allow Chat Deletion": "يستطيع حذف المحادثات",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "أختار الموديل", "Select a model": "أختار الموديل",
"Select an Ollama instance": "أختار سيرفر ", "Select an Ollama instance": "أختار سيرفر ",
"Select model": " أختار موديل", "Select model": " أختار موديل",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "يُرجى إدخال طلبك هنا", "Send a Message": "يُرجى إدخال طلبك هنا",
"Send message": "يُرجى إدخال طلبك هنا.", "Send message": "يُرجى إدخال طلبك هنا.",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "Разширени Параметри", "Advanced Parameters": "Разширени Параметри",
"all": "всички", "all": "всички",
"All Documents": "", "All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Всички Потребители", "All Users": "Всички Потребители",
"Allow": "Позволи", "Allow": "Позволи",
"Allow Chat Deletion": "Позволи Изтриване на Чат", "Allow Chat Deletion": "Позволи Изтриване на Чат",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "Изберете модел", "Select a model": "Изберете модел",
"Select an Ollama instance": "Изберете Ollama инстанция", "Select an Ollama instance": "Изберете Ollama инстанция",
"Select model": "", "Select model": "",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "Изпращане на Съобщение", "Send a Message": "Изпращане на Съобщение",
"Send message": "Изпращане на съобщение", "Send message": "Изпращане на съобщение",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "এডভান্সড প্যারামিটার্স", "Advanced Parameters": "এডভান্সড প্যারামিটার্স",
"all": "সব", "all": "সব",
"All Documents": "", "All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "সব ইউজার", "All Users": "সব ইউজার",
"Allow": "অনুমোদন", "Allow": "অনুমোদন",
"Allow Chat Deletion": "চ্যাট ডিলিট করতে দিন", "Allow Chat Deletion": "চ্যাট ডিলিট করতে দিন",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "একটি মডেল নির্বাচন করুন", "Select a model": "একটি মডেল নির্বাচন করুন",
"Select an Ollama instance": "একটি Ollama ইন্সট্যান্স নির্বাচন করুন", "Select an Ollama instance": "একটি Ollama ইন্সট্যান্স নির্বাচন করুন",
"Select model": "", "Select model": "",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "একটি মেসেজ পাঠান", "Send a Message": "একটি মেসেজ পাঠান",
"Send message": "মেসেজ পাঠান", "Send message": "মেসেজ পাঠান",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "Paràmetres Avançats", "Advanced Parameters": "Paràmetres Avançats",
"all": "tots", "all": "tots",
"All Documents": "", "All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Tots els Usuaris", "All Users": "Tots els Usuaris",
"Allow": "Permet", "Allow": "Permet",
"Allow Chat Deletion": "Permet la Supressió del Xat", "Allow Chat Deletion": "Permet la Supressió del Xat",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "Selecciona un model", "Select a model": "Selecciona un model",
"Select an Ollama instance": "Selecciona una instància d'Ollama", "Select an Ollama instance": "Selecciona una instància d'Ollama",
"Select model": "", "Select model": "",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "Envia un Missatge", "Send a Message": "Envia un Missatge",
"Send message": "Envia missatge", "Send message": "Envia missatge",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "Erweiterte Parameter", "Advanced Parameters": "Erweiterte Parameter",
"all": "Alle", "all": "Alle",
"All Documents": "Alle Dokumente", "All Documents": "Alle Dokumente",
"All selected models do not support image input, removed images": "",
"All Users": "Alle Benutzer", "All Users": "Alle Benutzer",
"Allow": "Erlauben", "Allow": "Erlauben",
"Allow Chat Deletion": "Chat Löschung erlauben", "Allow Chat Deletion": "Chat Löschung erlauben",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "Ein Modell auswählen", "Select a model": "Ein Modell auswählen",
"Select an Ollama instance": "Eine Ollama Instanz auswählen", "Select an Ollama instance": "Eine Ollama Instanz auswählen",
"Select model": "", "Select model": "",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "Eine Nachricht senden", "Send a Message": "Eine Nachricht senden",
"Send message": "Nachricht senden", "Send message": "Nachricht senden",
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
"Advanced Parameters": "Advanced Parameters", "Advanced Parameters": "Advanced Parameters",
"all": "all", "all": "all",
"All Documents": "", "All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "All Users", "All Users": "All Users",
"Allow": "Allow", "Allow": "Allow",
"Allow Chat Deletion": "Allow Delete Chats", "Allow Chat Deletion": "Allow Delete Chats",
...@@ -392,7 +393,7 @@ ...@@ -392,7 +393,7 @@
"Select a model": "Select a model much choice", "Select a model": "Select a model much choice",
"Select an Ollama instance": "Select an Ollama instance very choose", "Select an Ollama instance": "Select an Ollama instance very choose",
"Select model": "", "Select model": "",
"Selected model does not support image inputs.": "", "Selected models do not support image inputs": "",
"Send": "", "Send": "",
"Send a Message": "Send a Message much message", "Send a Message": "Send a Message much message",
"Send message": "Send message very send", "Send message": "Send message very send",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment