Commit 4002ead6 authored by Jun Siang Cheah's avatar Jun Siang Cheah
Browse files

feat: store model configs in the database

parent 1bacd5d9
......@@ -18,8 +18,9 @@ import requests
from pydantic import BaseModel, ConfigDict
from typing import Optional, List
from apps.web.models.models import Models
from utils.utils import get_verified_user, get_current_user, get_admin_user
from config import SRC_LOG_LEVELS, ENV, MODEL_CONFIG
from config import SRC_LOG_LEVELS
from constants import MESSAGES
import os
......@@ -77,11 +78,12 @@ with open(LITELLM_CONFIG_DIR, "r") as file:
app.state.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER.value
app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST.value
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("litellm")
]
app.state.ENABLE = ENABLE_LITELLM
app.state.CONFIG = litellm_config
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("litellm", [])
# Global variable to store the subprocess reference
background_process = None
......@@ -268,9 +270,9 @@ async def get_models(user=Depends(get_current_user)):
(
item
for item in app.state.MODEL_CONFIG
if item["name"] == model["model_name"]
if item.id == model["model_name"]
),
{},
None,
),
}
for model in app.state.CONFIG["model_list"]
......@@ -286,7 +288,7 @@ async def get_models(user=Depends(get_current_user)):
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
)
......
......@@ -29,7 +29,7 @@ import time
from urllib.parse import urlparse
from typing import Optional, List, Union
from apps.web.models.models import Models
from apps.web.models.users import Users
from constants import ERROR_MESSAGES
from utils.utils import (
......@@ -46,7 +46,6 @@ from config import (
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
UPLOAD_DIR,
MODEL_CONFIG,
AppConfig,
)
from utils.misc import calculate_sha256
......@@ -67,7 +66,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("ollama", [])
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("ollama")
]
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {}
......@@ -179,7 +180,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["model"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["model"]), None
)
......
......@@ -10,7 +10,7 @@ import logging
from pydantic import BaseModel
from apps.web.models.models import Models
from apps.web.models.users import Users
from constants import ERROR_MESSAGES
from utils.utils import (
......@@ -27,7 +27,6 @@ from config import (
CACHE_DIR,
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
MODEL_CONFIG,
AppConfig,
)
from typing import List, Optional
......@@ -53,7 +52,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("openai", [])
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("openai")
]
app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
......@@ -262,7 +263,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
)
......
"""Peewee migrations -- 002_add_local_sharing.py.
"""Peewee migrations -- 008_add_models.py.
Some examples (model - class or model name)::
......@@ -37,43 +37,24 @@ with suppress(ImportError):
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your migrations here."""
# Adding fields created_at and updated_at to the 'user' table
migrator.add_fields(
"user",
created_at=pw.BigIntegerField(null=True), # Allow null for transition
updated_at=pw.BigIntegerField(null=True), # Allow null for transition
last_active_at=pw.BigIntegerField(null=True), # Allow null for transition
)
@migrator.create_model
class Model(pw.Model):
id = pw.TextField()
source = pw.TextField()
base_model = pw.TextField(null=True)
name = pw.TextField()
params = pw.TextField()
# Populate the new fields from an existing 'timestamp' field
migrator.sql(
'UPDATE "user" SET created_at = timestamp, updated_at = timestamp, last_active_at = timestamp WHERE timestamp IS NOT NULL'
)
class Meta:
table_name = "model"
# Now that the data has been copied, remove the original 'timestamp' field
migrator.remove_fields("user", "timestamp")
# Update the fields to be not null now that they are populated
migrator.change_fields(
"user",
created_at=pw.BigIntegerField(null=False),
updated_at=pw.BigIntegerField(null=False),
last_active_at=pw.BigIntegerField(null=False),
)
indexes = (
# Create a unique index on the id, source columns
(("id", "source"), True),
)
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your rollback migrations here."""
# Recreate the timestamp field initially allowing null values for safe transition
migrator.add_fields("user", timestamp=pw.BigIntegerField(null=True))
# Copy the earliest created_at date back into the new timestamp field
# This assumes created_at was originally a copy of timestamp
migrator.sql('UPDATE "user" SET timestamp = created_at')
# Remove the created_at and updated_at fields
migrator.remove_fields("user", "created_at", "updated_at", "last_active_at")
# Finally, alter the timestamp field to not allow nulls if that was the original setting
migrator.change_fields("user", timestamp=pw.BigIntegerField(null=False))
migrator.remove_model("model")
import json
from typing import Optional
import peewee as pw
from playhouse.shortcuts import model_to_dict
from pydantic import BaseModel
from apps.web.internal.db import DB
####################
# Models DB Schema
####################
# ModelParams is a model for the data stored in the params field of the Model table
# It isn't currently used in the backend, but it's here as a reference
class ModelParams(BaseModel):
"""
A Pydantic model that represents the parameters of a model.
Attributes:
description (str): A description of the model.
vision_capable (bool): A flag indicating if the model is capable of vision and thus image inputs.
"""
description: str
vision_capable: bool
class Model(pw.Model):
id = pw.TextField()
"""
The model's id as used in the API. If set to an existing model, it will override the model.
"""
source = pw.TextField()
"""
The source of the model, e.g., ollama, openai, or litellm.
"""
base_model = pw.TextField(null=True)
"""
An optional pointer to the actual model that should be used when proxying requests.
Currently unused - but will be used to support Modelfile like behaviour in the future
"""
name = pw.TextField()
"""
The human-readable display name of the model.
"""
params = pw.TextField()
"""
Holds a JSON encoded blob of parameters, see `ModelParams`.
"""
class Meta:
database = DB
indexes = (
# Create a unique index on the id, source columns
(("id", "source"), True),
)
class ModelModel(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: str
def to_form(self) -> "ModelForm":
return ModelForm(**{**self.model_dump(), "params": json.loads(self.params)})
####################
# Forms
####################
class ModelForm(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: dict
def to_db_model(self) -> ModelModel:
return ModelModel(**{**self.model_dump(), "params": json.dumps(self.params)})
class ModelsTable:
def __init__(
self,
db: pw.SqliteDatabase | pw.PostgresqlDatabase,
):
self.db = db
self.db.create_tables([Model])
def get_all_models(self) -> list[ModelModel]:
return [ModelModel(**model_to_dict(model)) for model in Model.select()]
def get_all_models_by_source(self, source: str) -> list[ModelModel]:
return [
ModelModel(**model_to_dict(model))
for model in Model.select().where(Model.source == source)
]
def update_all_models(self, models: list[ModelForm]) -> bool:
try:
with self.db.atomic():
# Fetch current models from the database
current_models = self.get_all_models()
current_model_dict = {
(model.id, model.source): model for model in current_models
}
# Create a set of model IDs and sources from the current models and the new models
current_model_keys = set(current_model_dict.keys())
new_model_keys = set((model.id, model.source) for model in models)
# Determine which models need to be created, updated, or deleted
models_to_create = [
model
for model in models
if (model.id, model.source) not in current_model_keys
]
models_to_update = [
model
for model in models
if (model.id, model.source) in current_model_keys
]
models_to_delete = current_model_keys - new_model_keys
# Perform the necessary database operations
for model in models_to_create:
Model.create(**model.to_db_model().model_dump())
for model in models_to_update:
Model.update(**model.to_db_model().model_dump()).where(
(Model.id == model.id) & (Model.source == model.source)
).execute()
for model_id, model_source in models_to_delete:
Model.delete().where(
(Model.id == model_id) & (Model.source == model_source)
).execute()
return True
except Exception as e:
return False
Models = ModelsTable(DB)
......@@ -549,10 +549,6 @@ WEBHOOK_URL = PersistentConfig(
ENABLE_ADMIN_EXPORT = os.environ.get("ENABLE_ADMIN_EXPORT", "True").lower() == "true"
MODEL_CONFIG = PersistentConfig(
"CONFIG_DATA", "models", {"ollama": [], "litellm": [], "openai": []}
)
####################################
# WEBUI_SECRET_KEY
####################################
......
......@@ -35,9 +35,9 @@ from apps.web.main import app as webui_app
import asyncio
from pydantic import BaseModel
from typing import List
from typing import List, Optional
from apps.web.models.models import Models, ModelModel, ModelForm
from utils.utils import get_admin_user
from apps.rag.utils import rag_messages
......@@ -59,7 +59,6 @@ from config import (
SRC_LOG_LEVELS,
WEBHOOK_URL,
ENABLE_ADMIN_EXPORT,
MODEL_CONFIG,
AppConfig,
)
from constants import ERROR_MESSAGES
......@@ -113,7 +112,7 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.config.MODEL_CONFIG = MODEL_CONFIG
app.state.MODEL_CONFIG = [model.to_form() for model in Models.get_all_models()]
app.state.config.WEBHOOK_URL = WEBHOOK_URL
......@@ -310,43 +309,40 @@ async def update_model_filter_config(
}
class ModelConfig(BaseModel):
id: str
name: str
description: str
vision_capable: bool
class SetModelConfigForm(BaseModel):
ollama: List[ModelConfig]
litellm: List[ModelConfig]
openai: List[ModelConfig]
models: List[ModelForm]
@app.post("/api/config/models")
async def update_model_config(
form_data: SetModelConfigForm, user=Depends(get_admin_user)
):
data = form_data.model_dump()
if not Models.update_all_models(form_data.models):
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT("Failed to update model config"),
)
ollama_app.state.MODEL_CONFIG = data.get("ollama", [])
ollama_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "ollama"
]
openai_app.state.MODEL_CONFIG = data.get("openai", [])
openai_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "openai"
]
litellm_app.state.MODEL_CONFIG = data.get("litellm", [])
litellm_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "litellm"
]
app.state.config.MODEL_CONFIG = {
"ollama": ollama_app.state.MODEL_CONFIG,
"openai": openai_app.state.MODEL_CONFIG,
"litellm": litellm_app.state.MODEL_CONFIG,
}
app.state.MODEL_CONFIG = [model for model in form_data.models]
return {"models": app.state.config.MODEL_CONFIG}
return {"models": app.state.MODEL_CONFIG}
@app.get("/api/config/models")
async def get_model_config(user=Depends(get_admin_user)):
return {"models": app.state.config.MODEL_CONFIG}
return {"models": app.state.MODEL_CONFIG}
@app.get("/api/webhook")
......
......@@ -226,16 +226,18 @@ export const getModelConfig = async (token: string): Promise<GlobalModelConfig>
export interface ModelConfig {
id: string;
name?: string;
name: string;
source: string;
base_model?: string;
params: ModelParams;
}
export interface ModelParams {
description?: string;
vision_capable?: boolean;
}
export interface GlobalModelConfig {
ollama: ModelConfig[];
litellm: ModelConfig[];
openai: ModelConfig[];
}
export type GlobalModelConfig = ModelConfig[];
export const updateModelConfig = async (token: string, config: GlobalModelConfig) => {
let error = null;
......@@ -246,7 +248,9 @@ export const updateModelConfig = async (token: string, config: GlobalModelConfig
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify(config)
body: JSON.stringify({
models: config
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
......
......@@ -34,7 +34,7 @@ export const getLiteLLMModels = async (token: string = '') => {
name: model.name ?? model.id,
external: true,
source: 'LiteLLM',
custom_info: model.custom_info ?? {}
custom_info: model.custom_info
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);
......
......@@ -234,7 +234,7 @@ export const getOpenAIModels = async (token: string = '') => {
id: model.id,
name: model.name ?? model.id,
external: true,
custom_info: model.custom_info ?? {}
custom_info: model.custom_info
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);
......
<script lang="ts">
import { toast } from 'svelte-sonner';
import { onMount, tick, getContext } from 'svelte';
import { type Model, mobile, modelfiles, settings, showSidebar } from '$lib/stores';
import { type Model, mobile, modelfiles, settings, showSidebar, models } from '$lib/stores';
import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
import {
......@@ -27,7 +27,8 @@
export let stopResponse: Function;
export let autoScroll = true;
export let selectedModel: Model | undefined;
export let selectedAtModel: Model | undefined;
export let selectedModels: [''];
let chatTextAreaElement: HTMLTextAreaElement;
let filesInputElement;
......@@ -52,6 +53,8 @@
let speechRecognition;
let visionCapableState = 'all';
$: if (prompt) {
if (chatTextAreaElement) {
chatTextAreaElement.style.height = '';
......@@ -59,6 +62,20 @@
}
}
$: {
if (selectedAtModel || selectedModels) {
visionCapableState = checkModelsAreVisionCapable();
if (visionCapableState === 'none') {
// Remove all image files
const fileCount = files.length;
files = files.filter((file) => file.type != 'image');
if (files.length < fileCount) {
toast.warning($i18n.t('All selected models do not support image input, removed images'));
}
}
}
}
let mediaRecorder;
let audioChunks = [];
let isRecording = false;
......@@ -326,6 +343,35 @@
}
};
const checkModelsAreVisionCapable = () => {
let modelsToCheck = [];
if (selectedAtModel !== undefined) {
modelsToCheck = [selectedAtModel.id];
} else {
modelsToCheck = selectedModels;
}
if (modelsToCheck.length == 0 || modelsToCheck[0] == '') {
return 'all';
}
let visionCapableCount = 0;
for (const modelName of modelsToCheck) {
const model = $models.find((m) => m.id === modelName);
if (!model) {
continue;
}
if (model.custom_info?.params.vision_capable ?? true) {
visionCapableCount++;
}
}
if (visionCapableCount == modelsToCheck.length) {
return 'all';
} else if (visionCapableCount == 0) {
return 'none';
} else {
return 'some';
}
};
onMount(() => {
window.setTimeout(() => chatTextAreaElement?.focus(), 0);
......@@ -358,11 +404,9 @@
inputFiles.forEach((file) => {
console.log(file, file.name.split('.').at(-1));
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) {
if (!(selectedModel.custom_info?.vision_capable ?? true)) {
toast.error($i18n.t('Selected model does not support image inputs.'));
return;
}
if (visionCapableState == 'none') {
toast.error($i18n.t('Selected models do not support image inputs'));
return;
}
let reader = new FileReader();
reader.onload = (event) => {
......@@ -500,12 +544,12 @@
bind:chatInputPlaceholder
{messages}
on:select={(e) => {
selectedModel = e.detail;
selectedAtModel = e.detail;
chatTextAreaElement?.focus();
}}
/>
{#if selectedModel !== undefined}
{#if selectedAtModel !== undefined}
<div
class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900"
>
......@@ -514,7 +558,7 @@
crossorigin="anonymous"
alt="model profile"
class="size-5 max-w-[28px] object-cover rounded-full"
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedModel.id)
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedAtModel.id)
?.imageUrl ??
($i18n.language === 'dg-DG'
? `/doge.png`
......@@ -522,7 +566,7 @@
/>
<div>
Talking to <span class=" font-medium"
>{selectedModel.custom_info?.name ?? selectedModel.name}
>{selectedAtModel.custom_info?.name ?? selectedAtModel.name}
</span>
</div>
</div>
......@@ -530,7 +574,7 @@
<button
class="flex items-center"
on:click={() => {
selectedModel = undefined;
selectedAtModel = undefined;
}}
>
<XMark />
......@@ -556,13 +600,11 @@
const _inputFiles = Array.from(inputFiles);
_inputFiles.forEach((file) => {
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) {
if (!(selectedModel.custom_info?.vision_capable ?? true)) {
toast.error($i18n.t('Selected model does not support image inputs.'));
inputFiles = null;
filesInputElement.value = '';
return;
}
if (visionCapableState === 'none') {
toast.error($i18n.t('Selected models do not support image inputs'));
inputFiles = null;
filesInputElement.value = '';
return;
}
let reader = new FileReader();
reader.onload = (event) => {
......@@ -897,7 +939,7 @@
if (e.key === 'Escape') {
console.log('Escape');
selectedModel = undefined;
selectedAtModel = undefined;
}
}}
rows="1"
......
......@@ -12,7 +12,12 @@
import { user, MODEL_DOWNLOAD_POOL, models, mobile } from '$lib/stores';
import { toast } from 'svelte-sonner';
import { capitalizeFirstLetter, getModels, splitStream } from '$lib/utils';
import {
capitalizeFirstLetter,
getModels,
sanitizeResponseContent,
splitStream
} from '$lib/utils';
import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n');
......@@ -23,7 +28,12 @@
export let searchEnabled = true;
export let searchPlaceholder = $i18n.t('Search a model');
export let items = [{ value: 'mango', label: 'Mango' }];
export let items: {
label: string;
value: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[key: string]: any;
} = [];
export let className = ' w-[30rem]';
......@@ -248,12 +258,8 @@
<!-- {JSON.stringify(item.info)} -->
{#if item.info.external}
<Tooltip
content={`${item.info?.source ?? 'External'}${
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
>
<div class=" mr-2">
<Tooltip content={`${item.info?.source ?? 'External'}`}>
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
......@@ -279,11 +285,9 @@
item.info?.details?.quantization_level
? item.info?.details?.quantization_level + ' '
: ''
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}${
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}
>
<div class=" mr-2">
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
......@@ -301,8 +305,31 @@
</div>
</Tooltip>
{/if}
{#if item.info?.custom_info?.params.description}
<Tooltip
content={`${sanitizeResponseContent(
item.info.custom_info?.params.description
).replaceAll('\n', '<br>')}`}
>
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-4 h-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.879 7.519c1.171-1.025 3.071-1.025 4.242 0 1.172 1.025 1.172 2.687 0 3.712-.203.179-.43.326-.67.442-.745.361-1.45.999-1.45 1.827v.75M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Zm-9 5.25h.008v.008H12v-.008Z"
/>
</svg>
</div>
</Tooltip>
{/if}
</div>
{#if value === item.value}
<div class="ml-auto">
<Check />
......
......@@ -80,8 +80,8 @@
const model = $models.find((m) => m.id === selectedModelId);
if (model) {
modelName = model.custom_info?.name ?? model.name;
modelDescription = model.custom_info?.description ?? '';
modelIsVisionCapable = model.custom_info?.vision_capable ?? false;
modelDescription = model.custom_info?.params.description ?? '';
modelIsVisionCapable = model.custom_info?.params.vision_capable ?? false;
}
};
......@@ -521,13 +521,18 @@
const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
// Remove any existing config
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId);
modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
// Add new config
modelConfig[modelSource].push({
modelConfig.push({
id: selectedModelId,
name: modelName,
description: modelDescription,
vision_capable: modelIsVisionCapable
source: modelSource,
params: {
description: modelDescription,
vision_capable: modelIsVisionCapable
}
});
await updateModelConfig(localStorage.token, modelConfig);
toast.success(
......@@ -546,7 +551,9 @@
}
const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId);
modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
await updateModelConfig(localStorage.token, modelConfig);
toast.success(
$i18n.t('Model info for {{modelName}} deleted successfully', { modelName: selectedModelId })
......@@ -559,18 +566,28 @@
};
onMount(async () => {
OLLAMA_URLS = await getOllamaUrls(localStorage.token).catch((error) => {
toast.error(error);
return [];
});
if (OLLAMA_URLS.length > 0) {
selectedOllamaUrlIdx = 0;
}
console.log('mounting');
await Promise.all([
(async () => {
OLLAMA_URLS = await getOllamaUrls(localStorage.token).catch((error) => {
toast.error(error);
return [];
});
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
modelConfig = await getModelConfig(localStorage.token);
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
if (OLLAMA_URLS.length > 0) {
selectedOllamaUrlIdx = 0;
}
})(),
(async () => {
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
})(),
(async () => {
modelConfig = await getModelConfig(localStorage.token);
})(),
(async () => {
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
})()
]);
});
</script>
......
......@@ -325,7 +325,7 @@
.filter((model) => model.name !== 'hr')
.map((model) => ({
value: model.id,
label: model.name,
label: model.custom_info?.name ?? model.name,
info: model
}))}
bind:value={selectedModelId}
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "التعليمات المتقدمة",
"all": "الكل",
"All Documents": "جميع الملفات",
"All selected models do not support image input, removed images": "",
"All Users": "جميع المستخدمين",
"Allow": "يسمح",
"Allow Chat Deletion": "يستطيع حذف المحادثات",
......@@ -392,7 +393,7 @@
"Select a model": "أختار الموديل",
"Select an Ollama instance": "أختار سيرفر ",
"Select model": " أختار موديل",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "يُرجى إدخال طلبك هنا",
"Send message": "يُرجى إدخال طلبك هنا.",
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "Разширени Параметри",
"all": "всички",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Всички Потребители",
"Allow": "Позволи",
"Allow Chat Deletion": "Позволи Изтриване на Чат",
......@@ -392,7 +393,7 @@
"Select a model": "Изберете модел",
"Select an Ollama instance": "Изберете Ollama инстанция",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Изпращане на Съобщение",
"Send message": "Изпращане на съобщение",
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "এডভান্সড প্যারামিটার্স",
"all": "সব",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "সব ইউজার",
"Allow": "অনুমোদন",
"Allow Chat Deletion": "চ্যাট ডিলিট করতে দিন",
......@@ -392,7 +393,7 @@
"Select a model": "একটি মডেল নির্বাচন করুন",
"Select an Ollama instance": "একটি Ollama ইন্সট্যান্স নির্বাচন করুন",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "একটি মেসেজ পাঠান",
"Send message": "মেসেজ পাঠান",
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "Paràmetres Avançats",
"all": "tots",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Tots els Usuaris",
"Allow": "Permet",
"Allow Chat Deletion": "Permet la Supressió del Xat",
......@@ -392,7 +393,7 @@
"Select a model": "Selecciona un model",
"Select an Ollama instance": "Selecciona una instància d'Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Envia un Missatge",
"Send message": "Envia missatge",
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "Erweiterte Parameter",
"all": "Alle",
"All Documents": "Alle Dokumente",
"All selected models do not support image input, removed images": "",
"All Users": "Alle Benutzer",
"Allow": "Erlauben",
"Allow Chat Deletion": "Chat Löschung erlauben",
......@@ -392,7 +393,7 @@
"Select a model": "Ein Modell auswählen",
"Select an Ollama instance": "Eine Ollama Instanz auswählen",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Eine Nachricht senden",
"Send message": "Nachricht senden",
......
......@@ -29,6 +29,7 @@
"Advanced Parameters": "Advanced Parameters",
"all": "all",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "All Users",
"Allow": "Allow",
"Allow Chat Deletion": "Allow Delete Chats",
......@@ -392,7 +393,7 @@
"Select a model": "Select a model much choice",
"Select an Ollama instance": "Select an Ollama instance very choose",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Send a Message much message",
"Send message": "Send message very send",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment