Merge pull request #1362 from open-webui/dev

0.1.116
This commit is contained in:
Timothy Jaeryang Baek 2024-03-31 01:17:32 -07:00 committed by GitHub
commit 3b0cb7945f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
57 changed files with 2096 additions and 754 deletions

View file

@ -25,3 +25,5 @@ jobs:
pip install yapf
- name: Format backend
run: bun run format:backend
- name: Check for changes after format
run: git diff --exit-code

View file

@ -18,5 +18,8 @@ jobs:
run: bun install
- name: Format frontend
run: bun run format
- name: Check for changes after format
run: git diff --exit-code
- name: Build frontend
if: always()
run: bun run build

View file

@ -5,6 +5,24 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.116] - 2024-03-31
### Added
- **🔄 Enhanced UI**: Model selector now conveniently located in the navbar, enabling seamless switching between multiple models during conversations.
- **🔍 Improved Model Selector**: Directly pull a model from the selector/Models now display detailed information for better understanding.
- **💬 Webhook Support**: Now compatible with Google Chat and Microsoft Teams.
- **🌐 Localization**: Korean translation (I18n) now available.
- **🌑 Dark Theme**: OLED dark theme introduced for reduced strain during prolonged usage.
- **🏷️ Tag Autocomplete**: Dropdown feature added for effortless chat tagging.
### Fixed
- **🔽 Auto-Scrolling**: Addressed OpenAI auto-scrolling issue.
- **🏷️ Tag Validation**: Implemented tag validation to prevent empty string tags.
- **🚫 Model Whitelisting**: Resolved LiteLLM model whitelisting issue.
- **✅ Spelling**: Corrected various spelling issues for improved readability.
## [0.1.115] - 2024-03-24
### Added

View file

@ -22,7 +22,13 @@ from utils.utils import (
)
from utils.misc import calculate_sha256
from config import SRC_LOG_LEVELS, CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR
from config import (
SRC_LOG_LEVELS,
CACHE_DIR,
UPLOAD_DIR,
WHISPER_MODEL,
WHISPER_MODEL_DIR,
)
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["AUDIO"])

View file

@ -33,7 +33,13 @@ from constants import ERROR_MESSAGES
from utils.utils import decode_token, get_current_user, get_admin_user
from config import SRC_LOG_LEVELS, OLLAMA_BASE_URLS, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST, UPLOAD_DIR
from config import (
SRC_LOG_LEVELS,
OLLAMA_BASE_URLS,
MODEL_FILTER_ENABLED,
MODEL_FILTER_LIST,
UPLOAD_DIR,
)
from utils.misc import calculate_sha256
log = logging.getLogger(__name__)
@ -770,7 +776,11 @@ async def generate_chat_completion(
r = None
log.debug("form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(form_data.model_dump_json(exclude_none=True).encode()))
log.debug(
"form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
form_data.model_dump_json(exclude_none=True).encode()
)
)
def get_request():
nonlocal form_data

View file

@ -21,6 +21,7 @@ from langchain_community.document_loaders import (
TextLoader,
PyPDFLoader,
CSVLoader,
BSHTMLLoader,
Docx2txtLoader,
UnstructuredEPubLoader,
UnstructuredWordDocumentLoader,
@ -114,6 +115,7 @@ class CollectionNameForm(BaseModel):
class StoreWebForm(CollectionNameForm):
url: str
@app.get("/")
async def get_status():
return {
@ -296,13 +298,18 @@ def store_web(form_data: StoreWebForm, user=Depends(get_current_user)):
def store_data_in_vector_db(data, collection_name, overwrite: bool = False) -> bool:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=app.state.CHUNK_SIZE,
chunk_overlap=app.state.CHUNK_OVERLAP,
add_start_index=True,
)
docs = text_splitter.split_documents(data)
return store_docs_in_vector_db(docs, collection_name, overwrite)
if len(docs) > 0:
return store_docs_in_vector_db(docs, collection_name, overwrite), None
else:
raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT)
def store_text_in_vector_db(
@ -318,6 +325,7 @@ def store_text_in_vector_db(
def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> bool:
texts = [doc.page_content for doc in docs]
metadatas = [doc.metadata for doc in docs]
@ -402,6 +410,8 @@ def get_loader(filename: str, file_content_type: str, file_path: str):
loader = UnstructuredRSTLoader(file_path, mode="elements")
elif file_ext == "xml":
loader = UnstructuredXMLLoader(file_path)
elif file_ext in ["htm", "html"]:
loader = BSHTMLLoader(file_path, open_encoding="unicode_escape")
elif file_ext == "md":
loader = UnstructuredMarkdownLoader(file_path)
elif file_content_type == "application/epub+zip":
@ -452,6 +462,8 @@ def store_doc(
loader, known_type = get_loader(file.filename, file.content_type, file_path)
data = loader.load()
try:
result = store_data_in_vector_db(data, collection_name)
if result:
@ -461,10 +473,10 @@ def store_doc(
"filename": filename,
"known_type": known_type,
}
else:
except Exception as e:
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT(),
detail=e,
)
except Exception as e:
log.exception(e)
@ -529,6 +541,7 @@ def scan_docs_dir(user=Depends(get_admin_user)):
)
data = loader.load()
try:
result = store_data_in_vector_db(data, collection_name)
if result:
@ -561,6 +574,9 @@ def scan_docs_dir(user=Depends(get_admin_user)):
}
),
)
except Exception as e:
print(e)
pass
except Exception as e:
log.exception(e)

View file

@ -11,6 +11,7 @@ from utils.utils import verify_password
from apps.web.internal.db import DB
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])

View file

@ -13,6 +13,7 @@ from apps.web.internal.db import DB
import json
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])

View file

@ -64,8 +64,8 @@ class ModelfilesTable:
self.db.create_tables([Modelfile])
def insert_new_modelfile(
self, user_id: str,
form_data: ModelfileForm) -> Optional[ModelfileModel]:
self, user_id: str, form_data: ModelfileForm
) -> Optional[ModelfileModel]:
if "tagName" in form_data.modelfile:
modelfile = ModelfileModel(
**{
@ -73,7 +73,8 @@ class ModelfilesTable:
"tag_name": form_data.modelfile["tagName"],
"modelfile": json.dumps(form_data.modelfile),
"timestamp": int(time.time()),
})
}
)
try:
result = Modelfile.create(**modelfile.model_dump())
@ -87,29 +88,28 @@ class ModelfilesTable:
else:
return None
def get_modelfile_by_tag_name(self,
tag_name: str) -> Optional[ModelfileModel]:
def get_modelfile_by_tag_name(self, tag_name: str) -> Optional[ModelfileModel]:
try:
modelfile = Modelfile.get(Modelfile.tag_name == tag_name)
return ModelfileModel(**model_to_dict(modelfile))
except:
return None
def get_modelfiles(self,
skip: int = 0,
limit: int = 50) -> List[ModelfileResponse]:
def get_modelfiles(self, skip: int = 0, limit: int = 50) -> List[ModelfileResponse]:
return [
ModelfileResponse(
**{
**model_to_dict(modelfile),
"modelfile":
json.loads(modelfile.modelfile),
}) for modelfile in Modelfile.select()
"modelfile": json.loads(modelfile.modelfile),
}
)
for modelfile in Modelfile.select()
# .limit(limit).offset(skip)
]
def update_modelfile_by_tag_name(
self, tag_name: str, modelfile: dict) -> Optional[ModelfileModel]:
self, tag_name: str, modelfile: dict
) -> Optional[ModelfileModel]:
try:
query = Modelfile.update(
modelfile=json.dumps(modelfile),

View file

@ -52,8 +52,9 @@ class PromptsTable:
self.db = db
self.db.create_tables([Prompt])
def insert_new_prompt(self, user_id: str,
form_data: PromptForm) -> Optional[PromptModel]:
def insert_new_prompt(
self, user_id: str, form_data: PromptForm
) -> Optional[PromptModel]:
prompt = PromptModel(
**{
"user_id": user_id,
@ -61,7 +62,8 @@ class PromptsTable:
"title": form_data.title,
"content": form_data.content,
"timestamp": int(time.time()),
})
}
)
try:
result = Prompt.create(**prompt.model_dump())
@ -81,13 +83,14 @@ class PromptsTable:
def get_prompts(self) -> List[PromptModel]:
return [
PromptModel(**model_to_dict(prompt)) for prompt in Prompt.select()
PromptModel(**model_to_dict(prompt))
for prompt in Prompt.select()
# .limit(limit).offset(skip)
]
def update_prompt_by_command(
self, command: str,
form_data: PromptForm) -> Optional[PromptModel]:
self, command: str, form_data: PromptForm
) -> Optional[PromptModel]:
try:
query = Prompt.update(
title=form_data.title,

View file

@ -11,6 +11,7 @@ import logging
from apps.web.internal.db import DB
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])

View file

@ -29,6 +29,7 @@ from apps.web.models.tags import (
from constants import ERROR_MESSAGES
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])

View file

@ -10,7 +10,12 @@ import uuid
from apps.web.models.users import Users
from utils.utils import get_password_hash, get_current_user, get_admin_user, create_token
from utils.utils import (
get_password_hash,
get_current_user,
get_admin_user,
create_token,
)
from utils.misc import get_gravatar_url, validate_email_format
from constants import ERROR_MESSAGES
@ -43,7 +48,6 @@ async def set_global_default_models(
return request.app.state.DEFAULT_MODELS
@router.post("/default/suggestions", response_model=List[PromptSuggestion])
async def set_global_default_suggestions(
request: Request,

View file

@ -24,9 +24,9 @@ router = APIRouter()
@router.get("/", response_model=List[ModelfileResponse])
async def get_modelfiles(skip: int = 0,
limit: int = 50,
user=Depends(get_current_user)):
async def get_modelfiles(
skip: int = 0, limit: int = 50, user=Depends(get_current_user)
):
return Modelfiles.get_modelfiles(skip, limit)
@ -36,17 +36,16 @@ async def get_modelfiles(skip: int = 0,
@router.post("/create", response_model=Optional[ModelfileResponse])
async def create_new_modelfile(form_data: ModelfileForm,
user=Depends(get_admin_user)):
async def create_new_modelfile(form_data: ModelfileForm, user=Depends(get_admin_user)):
modelfile = Modelfiles.insert_new_modelfile(user.id, form_data)
if modelfile:
return ModelfileResponse(
**{
**modelfile.model_dump(),
"modelfile":
json.loads(modelfile.modelfile),
})
"modelfile": json.loads(modelfile.modelfile),
}
)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
@ -60,17 +59,18 @@ async def create_new_modelfile(form_data: ModelfileForm,
@router.post("/", response_model=Optional[ModelfileResponse])
async def get_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
user=Depends(get_current_user)):
async def get_modelfile_by_tag_name(
form_data: ModelfileTagNameForm, user=Depends(get_current_user)
):
modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name)
if modelfile:
return ModelfileResponse(
**{
**modelfile.model_dump(),
"modelfile":
json.loads(modelfile.modelfile),
})
"modelfile": json.loads(modelfile.modelfile),
}
)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
@ -84,8 +84,9 @@ async def get_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
@router.post("/update", response_model=Optional[ModelfileResponse])
async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
user=Depends(get_admin_user)):
async def update_modelfile_by_tag_name(
form_data: ModelfileUpdateForm, user=Depends(get_admin_user)
):
modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name)
if modelfile:
updated_modelfile = {
@ -94,14 +95,15 @@ async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
}
modelfile = Modelfiles.update_modelfile_by_tag_name(
form_data.tag_name, updated_modelfile)
form_data.tag_name, updated_modelfile
)
return ModelfileResponse(
**{
**modelfile.model_dump(),
"modelfile":
json.loads(modelfile.modelfile),
})
"modelfile": json.loads(modelfile.modelfile),
}
)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
@ -115,7 +117,8 @@ async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm,
@router.delete("/delete", response_model=bool)
async def delete_modelfile_by_tag_name(form_data: ModelfileTagNameForm,
user=Depends(get_admin_user)):
async def delete_modelfile_by_tag_name(
form_data: ModelfileTagNameForm, user=Depends(get_admin_user)
):
result = Modelfiles.delete_modelfile_by_tag_name(form_data.tag_name)
return result

View file

@ -16,6 +16,7 @@ from utils.utils import get_current_user, get_password_hash, get_admin_user
from constants import ERROR_MESSAGES
from config import SRC_LOG_LEVELS
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])

View file

@ -26,6 +26,7 @@ except ImportError:
log.warning("dotenv not installed, skipping...")
WEBUI_NAME = "Open WebUI"
WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
shutil.copyfile("../build/favicon.png", "./static/favicon.png")
####################################
@ -116,7 +117,18 @@ else:
log = logging.getLogger(__name__)
log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
log_sources = ["AUDIO", "CONFIG", "DB", "IMAGES", "LITELLM", "MAIN", "MODELS", "OLLAMA", "OPENAI", "RAG"]
log_sources = [
"AUDIO",
"CONFIG",
"DB",
"IMAGES",
"LITELLM",
"MAIN",
"MODELS",
"OLLAMA",
"OPENAI",
"RAG",
]
SRC_LOG_LEVELS = {}
@ -141,7 +153,7 @@ if CUSTOM_NAME:
data = r.json()
if r.ok:
if "logo" in data:
url = (
WEBUI_FAVICON_URL = url = (
f"https://api.openwebui.com{data['logo']}"
if data["logo"][0] == "/"
else data["logo"]
@ -238,7 +250,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
)
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
K8S_FLAG = os.environ.get("K8S_FLAG", "")
if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
OLLAMA_BASE_URL = (
@ -251,6 +263,9 @@ if ENV == "prod":
if OLLAMA_BASE_URL == "/ollama":
OLLAMA_BASE_URL = "http://host.docker.internal:11434"
elif K8S_FLAG:
OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434"
OLLAMA_BASE_URLS = os.environ.get("OLLAMA_BASE_URLS", "")
OLLAMA_BASE_URLS = OLLAMA_BASE_URLS if OLLAMA_BASE_URLS != "" else OLLAMA_BASE_URL

View file

@ -60,3 +60,5 @@ class ERROR_MESSAGES(str, Enum):
MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found"
OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found"
OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama"
EMPTY_CONTENT = "The content provided is empty. Please ensure that there is text or data present before proceeding."

View file

@ -1,6 +1,7 @@
{
"version": 0,
"ui": {
"default_locale": "en-US",
"prompt_suggestions": [
{
"title": [

View file

@ -32,6 +32,7 @@ from utils.utils import get_admin_user
from apps.rag.utils import rag_messages
from config import (
CONFIG_DATA,
WEBUI_NAME,
ENV,
VERSION,
@ -49,6 +50,7 @@ logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MAIN"])
class SPAStaticFiles(StaticFiles):
async def get_response(self, path: str, scope):
try:
@ -88,7 +90,6 @@ class RAGMiddleware(BaseHTTPMiddleware):
# Example: Add a new key-value pair or modify existing ones
# data["modified"] = True # Example modification
if "docs" in data:
data = {**data}
data["messages"] = rag_messages(
data["docs"],
@ -163,11 +164,15 @@ app.mount("/rag/api/v1", rag_app)
@app.get("/api/config")
async def get_app_config():
return {
"status": True,
"name": WEBUI_NAME,
"version": VERSION,
"default_locale": (
CONFIG_DATA["ui"]["default_locale"]
if "default_locale" in CONFIG_DATA["ui"]
else "en-US"
),
"images": images_app.state.ENABLED,
"default_models": webui_app.state.DEFAULT_MODELS,
"default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
@ -191,7 +196,6 @@ class ModelFilterConfigForm(BaseModel):
async def update_model_filter_config(
form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
):
app.state.MODEL_FILTER_ENABLED = form_data.enabled
app.state.MODEL_FILTER_LIST = form_data.models
@ -201,6 +205,9 @@ async def update_model_filter_config(
openai_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED
openai_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST
litellm_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED
litellm_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST
return {
"enabled": app.state.MODEL_FILTER_ENABLED,
"models": app.state.MODEL_FILTER_LIST,
@ -231,7 +238,6 @@ async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
@app.get("/api/version")
async def get_app_config():
return {
"version": VERSION,
}
@ -239,7 +245,7 @@ async def get_app_config():
@app.get("/api/changelog")
async def get_app_changelog():
return CHANGELOG
return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
@app.get("/api/version/updates")

View file

@ -1,14 +1,41 @@
import json
import requests
from config import VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
def post_webhook(url: str, message: str, event_data: dict) -> bool:
try:
payload = {}
if "https://hooks.slack.com" in url:
# Slack and Google Chat Webhooks
if "https://hooks.slack.com" in url or "https://chat.googleapis.com" in url:
payload["text"] = message
# Discord Webhooks
elif "https://discord.com/api/webhooks" in url:
payload["content"] = message
# Microsoft Teams Webhooks
elif "webhook.office.com" in url:
action = event_data.get("action", "undefined")
facts = [
{"name": name, "value": value}
for name, value in json.loads(event_data.get("user", {})).items()
]
payload = {
"@type": "MessageCard",
"@context": "http://schema.org/extensions",
"themeColor": "0076D7",
"summary": message,
"sections": [
{
"activityTitle": message,
"activitySubtitle": f"{WEBUI_NAME} ({VERSION}) - {action}",
"activityImage": WEBUI_FAVICON_URL,
"facts": facts,
"markdown": True,
}
],
}
# Default Payload
else:
payload = {**event_data}

View file

@ -88,7 +88,7 @@ spec:
resources:
requests:
storage: {{ .Values.ollama.persistence.size | quote }}
storageClass: {{ .Values.ollama.persistence.storageClass }}
storageClassName: {{ .Values.ollama.persistence.storageClass }}
{{- with .Values.ollama.persistence.selector }}
selector:
{{- toYaml . | nindent 8 }}

View file

@ -17,7 +17,7 @@ spec:
resources:
requests:
storage: {{ .Values.webui.persistence.size }}
storageClass: {{ .Values.webui.persistence.storageClass }}
storageClassName: {{ .Values.webui.persistence.storageClass }}
{{- with .Values.webui.persistence.selector }}
selector:
{{- toYaml . | nindent 4 }}

View file

@ -35,4 +35,4 @@ spec:
volumes:
- name: webui-volume
persistentVolumeClaim:
claimName: ollama-webui-pvc
claimName: open-webui-pvc

View file

@ -2,8 +2,8 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
app: ollama-webui
name: ollama-webui-pvc
app: open-webui
name: open-webui-pvc
namespace: open-webui
spec:
accessModes: ["ReadWriteOnce"]

View file

@ -1,6 +1,6 @@
{
"name": "open-webui",
"version": "0.1.115",
"version": "0.1.116",
"private": true,
"scripts": {
"dev": "vite dev --host",

View file

@ -9,7 +9,11 @@
<script>
// On page load or when changing themes, best to add inline in `head` to avoid FOUC
(() => {
if (
if (localStorage?.theme && localStorage?.theme.includes('oled')) {
document.documentElement.style.setProperty('--color-gray-900', '#000000');
document.documentElement.style.setProperty('--color-gray-950', '#000000');
document.documentElement.classList.add('dark');
} else if (
localStorage.theme === 'light' ||
(!('theme' in localStorage) && window.matchMedia('(prefers-color-scheme: light)').matches)
) {

View file

@ -33,7 +33,7 @@ export const getLiteLLMModels = async (token: string = '') => {
id: model.id,
name: model.name ?? model.id,
external: true,
source: 'litellm'
source: 'LiteLLM'
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);

View file

@ -263,3 +263,53 @@ export const synthesizeOpenAISpeech = async (
return res;
};
export const generateTitle = async (
token: string = '',
template: string,
model: string,
prompt: string,
url: string = OPENAI_API_BASE_URL
) => {
let error = null;
template = template.replace(/{{prompt}}/g, prompt);
console.log(template);
const res = await fetch(`${url}/chat/completions`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
model: model,
messages: [
{
role: 'user',
content: template
}
],
stream: false
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
}
return null;
});
if (error) {
throw error;
}
return res?.choices[0]?.message?.content ?? 'New Chat';
};

View file

@ -1,9 +1,12 @@
<script lang="ts">
import { Collapsible } from 'bits-ui';
import { setDefaultModels } from '$lib/apis/configs';
import { models, showSettings, settings, user } from '$lib/stores';
import { onMount, tick, getContext } from 'svelte';
import { toast } from 'svelte-sonner';
import Select from '../common/Select.svelte';
import Selector from './ModelSelector/Selector.svelte';
import Tooltip from '../common/Tooltip.svelte';
const i18n = getContext('i18n');
@ -33,19 +36,19 @@
}
</script>
<div class="flex flex-col my-2 w-full">
<div class="flex flex-col mt-0.5 w-full">
{#each selectedModels as selectedModel, selectedModelIdx}
<div class="flex w-full">
<div class="overflow-hidden w-full">
<div class="mr-2 max-w-full">
<Select
<div class="mr-0.5 max-w-full">
<Selector
placeholder={$i18n.t('Select a model')}
items={$models
.filter((model) => model.name !== 'hr')
.map((model) => ({
value: model.id,
label:
model.name + `${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`
label: model.name,
info: model
}))}
bind:value={selectedModel}
/>
@ -53,10 +56,10 @@
</div>
{#if selectedModelIdx === 0}
<div class=" self-center mr-2 disabled:text-gray-600 disabled:hover:text-gray-600">
<Tooltip content="Add Model">
<button
class=" self-center {selectedModelIdx === 0
? 'mr-3'
: 'mr-7'} disabled:text-gray-600 disabled:hover:text-gray-600"
class=" "
{disabled}
on:click={() => {
selectedModels = [...selectedModels, ''];
@ -73,12 +76,12 @@
<path stroke-linecap="round" stroke-linejoin="round" d="M12 6v12m6-6H6" />
</svg>
</button>
</Tooltip>
</div>
{:else}
<div class=" self-center disabled:text-gray-600 disabled:hover:text-gray-600 mr-2">
<Tooltip content="Remove Model">
<button
class=" self-center disabled:text-gray-600 disabled:hover:text-gray-600 {selectedModelIdx ===
0
? 'mr-3'
: 'mr-7'}"
{disabled}
on:click={() => {
selectedModels.splice(selectedModelIdx, 1);
@ -96,41 +99,13 @@
<path stroke-linecap="round" stroke-linejoin="round" d="M19.5 12h-15" />
</svg>
</button>
{/if}
{#if selectedModelIdx === 0}
<button
class=" self-center dark:hover:text-gray-300"
id="open-settings-button"
on:click={async () => {
await showSettings.set(!$showSettings);
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-4 h-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M10.343 3.94c.09-.542.56-.94 1.11-.94h1.093c.55 0 1.02.398 1.11.94l.149.894c.07.424.384.764.78.93.398.164.855.142 1.205-.108l.737-.527a1.125 1.125 0 011.45.12l.773.774c.39.389.44 1.002.12 1.45l-.527.737c-.25.35-.272.806-.107 1.204.165.397.505.71.93.78l.893.15c.543.09.94.56.94 1.109v1.094c0 .55-.397 1.02-.94 1.11l-.893.149c-.425.07-.765.383-.93.78-.165.398-.143.854.107 1.204l.527.738c.32.447.269 1.06-.12 1.45l-.774.773a1.125 1.125 0 01-1.449.12l-.738-.527c-.35-.25-.806-.272-1.203-.107-.397.165-.71.505-.781.929l-.149.894c-.09.542-.56.94-1.11.94h-1.094c-.55 0-1.019-.398-1.11-.94l-.148-.894c-.071-.424-.384-.764-.781-.93-.398-.164-.854-.142-1.204.108l-.738.527c-.447.32-1.06.269-1.45-.12l-.773-.774a1.125 1.125 0 01-.12-1.45l.527-.737c.25-.35.273-.806.108-1.204-.165-.397-.505-.71-.93-.78l-.894-.15c-.542-.09-.94-.56-.94-1.109v-1.094c0-.55.398-1.02.94-1.11l.894-.149c.424-.07.765-.383.93-.78.165-.398.143-.854-.107-1.204l-.527-.738a1.125 1.125 0 01.12-1.45l.773-.773a1.125 1.125 0 011.45-.12l.737.527c.35.25.807.272 1.204.107.397-.165.71-.505.78-.929l.15-.894z"
/>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M15 12a3 3 0 11-6 0 3 3 0 016 0z"
/>
</svg>
</button>
</Tooltip>
</div>
{/if}
</div>
{/each}
</div>
<div class="text-left mt-1.5 ml-1 text-xs text-gray-500">
<div class="text-left mt-0.5 ml-1 text-[0.7rem] text-gray-500">
<button on:click={saveDefaultModel}> {$i18n.t('Set as default')}</button>
</div>

View file

@ -0,0 +1,389 @@
<script lang="ts">
import { Select } from 'bits-ui';
import { flyAndScale } from '$lib/utils/transitions';
import { createEventDispatcher, onMount, getContext, tick } from 'svelte';
import ChevronDown from '$lib/components/icons/ChevronDown.svelte';
import Check from '$lib/components/icons/Check.svelte';
import Search from '$lib/components/icons/Search.svelte';
import { cancelOllamaRequest, deleteModel, getOllamaVersion, pullModel } from '$lib/apis/ollama';
import { user, MODEL_DOWNLOAD_POOL, models } from '$lib/stores';
import { toast } from 'svelte-sonner';
import { capitalizeFirstLetter, getModels, splitStream } from '$lib/utils';
import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n');
const dispatch = createEventDispatcher();
export let value = '';
export let placeholder = 'Select a model';
export let searchEnabled = true;
export let searchPlaceholder = 'Search a model';
export let items = [{ value: 'mango', label: 'Mango' }];
let searchValue = '';
let ollamaVersion = null;
$: filteredItems = searchValue
? items.filter((item) => item.value.includes(searchValue.toLowerCase()))
: items;
const pullModelHandler = async () => {
const sanitizedModelTag = searchValue.trim();
console.log($MODEL_DOWNLOAD_POOL);
if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag]) {
toast.error(
$i18n.t(`Model '{{modelTag}}' is already in queue for downloading.`, {
modelTag: sanitizedModelTag
})
);
return;
}
if (Object.keys($MODEL_DOWNLOAD_POOL).length === 3) {
toast.error(
$i18n.t('Maximum of 3 models can be downloaded simultaneously. Please try again later.')
);
return;
}
const res = await pullModel(localStorage.token, sanitizedModelTag, '0').catch((error) => {
toast.error(error);
return null;
});
if (res) {
const reader = res.body
.pipeThrough(new TextDecoderStream())
.pipeThrough(splitStream('\n'))
.getReader();
while (true) {
try {
const { value, done } = await reader.read();
if (done) break;
let lines = value.split('\n');
for (const line of lines) {
if (line !== '') {
let data = JSON.parse(line);
console.log(data);
if (data.error) {
throw data.error;
}
if (data.detail) {
throw data.detail;
}
if (data.id) {
MODEL_DOWNLOAD_POOL.set({
...$MODEL_DOWNLOAD_POOL,
[sanitizedModelTag]: {
...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
requestId: data.id,
reader,
done: false
}
});
console.log(data);
}
if (data.status) {
if (data.digest) {
let downloadProgress = 0;
if (data.completed) {
downloadProgress = Math.round((data.completed / data.total) * 1000) / 10;
} else {
downloadProgress = 100;
}
MODEL_DOWNLOAD_POOL.set({
...$MODEL_DOWNLOAD_POOL,
[sanitizedModelTag]: {
...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
pullProgress: downloadProgress,
digest: data.digest
}
});
} else {
toast.success(data.status);
MODEL_DOWNLOAD_POOL.set({
...$MODEL_DOWNLOAD_POOL,
[sanitizedModelTag]: {
...$MODEL_DOWNLOAD_POOL[sanitizedModelTag],
done: data.status === 'success'
}
});
}
}
}
}
} catch (error) {
console.log(error);
if (typeof error !== 'string') {
error = error.message;
}
toast.error(error);
// opts.callback({ success: false, error, modelName: opts.modelName });
}
}
if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag].done) {
toast.success(
$i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, {
modelName: sanitizedModelTag
})
);
models.set(await getModels(localStorage.token));
} else {
toast.error('Download canceled');
}
delete $MODEL_DOWNLOAD_POOL[sanitizedModelTag];
MODEL_DOWNLOAD_POOL.set({
...$MODEL_DOWNLOAD_POOL
});
}
};
onMount(async () => {
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
});
const cancelModelPullHandler = async (model: string) => {
const { reader, requestId } = $MODEL_DOWNLOAD_POOL[model];
if (reader) {
await reader.cancel();
await cancelOllamaRequest(localStorage.token, requestId);
delete $MODEL_DOWNLOAD_POOL[model];
MODEL_DOWNLOAD_POOL.set({
...$MODEL_DOWNLOAD_POOL
});
await deleteModel(localStorage.token, model);
toast.success(`${model} download has been canceled`);
}
};
</script>
<Select.Root
{items}
onOpenChange={async () => {
searchValue = '';
window.setTimeout(() => document.getElementById('model-search-input')?.focus(), 0);
}}
selected={items.find((item) => item.value === value) ?? ''}
onSelectedChange={(selectedItem) => {
value = selectedItem.value;
}}
>
<Select.Trigger class="relative w-full" aria-label={placeholder}>
<Select.Value
class="flex text-left px-0.5 outline-none bg-transparent truncate text-lg font-semibold placeholder-gray-400 focus:outline-none"
{placeholder}
/>
<ChevronDown className="absolute end-2 top-1/2 -translate-y-[45%] size-3.5" strokeWidth="2.5" />
</Select.Trigger>
<Select.Content
class=" z-40 w-full rounded-lg bg-white dark:bg-gray-900 dark:text-white shadow-lg border border-gray-300/30 dark:border-gray-700/50 outline-none"
transition={flyAndScale}
sideOffset={4}
>
<slot>
{#if searchEnabled}
<div class="flex items-center gap-2.5 px-5 mt-3.5 mb-3">
<Search className="size-4" strokeWidth="2.5" />
<input
id="model-search-input"
bind:value={searchValue}
class="w-full text-sm bg-transparent outline-none"
placeholder={searchPlaceholder}
/>
</div>
<hr class="border-gray-100 dark:border-gray-800" />
{/if}
<div class="px-3 my-2 max-h-72 overflow-y-auto">
{#each filteredItems as item}
<Select.Item
class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
value={item.value}
label={item.label}
>
<div class="flex items-center gap-2">
<div class="line-clamp-1">
{item.label}
<span class=" text-xs font-medium text-gray-600 dark:text-gray-400"
>{item.info?.details?.parameter_size ?? ''}</span
>
</div>
<!-- {JSON.stringify(item.info)} -->
{#if item.info.external}
<Tooltip content={item.info?.source ?? 'External'}>
<div class=" mr-2">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="size-3"
>
<path
fill-rule="evenodd"
d="M8.914 6.025a.75.75 0 0 1 1.06 0 3.5 3.5 0 0 1 0 4.95l-2 2a3.5 3.5 0 0 1-5.396-4.402.75.75 0 0 1 1.251.827 2 2 0 0 0 3.085 2.514l2-2a2 2 0 0 0 0-2.828.75.75 0 0 1 0-1.06Z"
clip-rule="evenodd"
/>
<path
fill-rule="evenodd"
d="M7.086 9.975a.75.75 0 0 1-1.06 0 3.5 3.5 0 0 1 0-4.95l2-2a3.5 3.5 0 0 1 5.396 4.402.75.75 0 0 1-1.251-.827 2 2 0 0 0-3.085-2.514l-2 2a2 2 0 0 0 0 2.828.75.75 0 0 1 0 1.06Z"
clip-rule="evenodd"
/>
</svg>
</div>
</Tooltip>
{:else}
<Tooltip
content={`${
item.info?.details?.quantization_level
? item.info?.details?.quantization_level + ' '
: ''
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}
>
<div class=" mr-2">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-4 h-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="m11.25 11.25.041-.02a.75.75 0 0 1 1.063.852l-.708 2.836a.75.75 0 0 0 1.063.853l.041-.021M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Zm-9-3.75h.008v.008H12V8.25Z"
/>
</svg>
</div>
</Tooltip>
{/if}
</div>
{#if value === item.value}
<div class="ml-auto">
<Check />
</div>
{/if}
</Select.Item>
{:else}
<div>
<div class="block px-3 py-2 text-sm text-gray-700 dark:text-gray-100">
No results found
</div>
</div>
{/each}
{#if !(searchValue.trim() in $MODEL_DOWNLOAD_POOL) && searchValue && ollamaVersion && $user.role === 'admin'}
<button
class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
on:click={() => {
pullModelHandler();
}}
>
Pull "{searchValue}" from Ollama.com
</button>
{/if}
{#each Object.keys($MODEL_DOWNLOAD_POOL) as model}
<div
class="flex w-full justify-between font-medium select-none rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
>
<div class="flex">
<div class="-ml-2 mr-2.5 translate-y-0.5">
<svg
class="size-4"
viewBox="0 0 24 24"
fill="currentColor"
xmlns="http://www.w3.org/2000/svg"
><style>
.spinner_ajPY {
transform-origin: center;
animation: spinner_AtaB 0.75s infinite linear;
}
@keyframes spinner_AtaB {
100% {
transform: rotate(360deg);
}
}
</style><path
d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
opacity=".25"
/><path
d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
class="spinner_ajPY"
/></svg
>
</div>
<div class="flex flex-col self-start">
<div class="line-clamp-1">
Downloading "{model}" {'pullProgress' in $MODEL_DOWNLOAD_POOL[model]
? `(${$MODEL_DOWNLOAD_POOL[model].pullProgress}%)`
: ''}
</div>
{#if 'digest' in $MODEL_DOWNLOAD_POOL[model] && $MODEL_DOWNLOAD_POOL[model].digest}
<div class="-mt-1 h-fit text-[0.7rem] dark:text-gray-500 line-clamp-1">
{$MODEL_DOWNLOAD_POOL[model].digest}
</div>
{/if}
</div>
</div>
<div class="mr-2 translate-y-0.5">
<Tooltip content="Cancel">
<button
class="text-gray-800 dark:text-gray-100"
on:click={() => {
cancelModelPullHandler(model);
}}
>
<svg
class="w-4 h-4 text-gray-800 dark:text-white"
aria-hidden="true"
xmlns="http://www.w3.org/2000/svg"
width="24"
height="24"
fill="currentColor"
viewBox="0 0 24 24"
>
<path
stroke="currentColor"
stroke-linecap="round"
stroke-linejoin="round"
stroke-width="2"
d="M6 18 17.94 6M18 18 6.06 6"
/>
</svg>
</button>
</Tooltip>
</div>
</div>
{/each}
</div>
</slot>
</Select.Content>
</Select.Root>

View file

@ -14,7 +14,7 @@
export let getModels: Function;
// General
let themes = ['dark', 'light', 'rose-pine dark', 'rose-pine-dawn light'];
let themes = ['dark', 'light', 'rose-pine dark', 'rose-pine-dawn light', 'oled-dark'];
let selectedTheme = 'system';
let languages = [];
@ -91,12 +91,17 @@
});
const applyTheme = (_theme: string) => {
let themeToApply = _theme;
let themeToApply = _theme === 'oled-dark' ? 'dark' : _theme;
if (_theme === 'system') {
themeToApply = window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light';
}
if (themeToApply === 'dark' && !_theme.includes('oled')) {
document.documentElement.style.setProperty('--color-gray-900', '#171717');
document.documentElement.style.setProperty('--color-gray-950', '#0d0d0d');
}
themes
.filter((e) => e !== themeToApply)
.forEach((e) => {
@ -115,7 +120,11 @@
const themeChangeHandler = (_theme: string) => {
theme.set(_theme);
localStorage.setItem('theme', _theme);
if (_theme.includes('oled')) {
document.documentElement.style.setProperty('--color-gray-900', '#000000');
document.documentElement.style.setProperty('--color-gray-950', '#000000');
document.documentElement.classList.add('dark');
}
applyTheme(_theme);
};
</script>
@ -136,6 +145,7 @@
>
<option value="system">⚙️ {$i18n.t('System')}</option>
<option value="dark">🌑 {$i18n.t('Dark')}</option>
<option value="oled-dark">🌃 {$i18n.t('OLED Dark')}</option>
<option value="light">☀️ {$i18n.t('Light')}</option>
<option value="rose-pine dark">🪻 {$i18n.t('Rosé Pine')}</option>
<option value="rose-pine-dawn light">🌷 {$i18n.t('Rosé Pine Dawn')}</option>

View file

@ -1,7 +1,7 @@
<script lang="ts">
import { getBackendConfig } from '$lib/apis';
import { setDefaultPromptSuggestions } from '$lib/apis/configs';
import { config, models, user } from '$lib/stores';
import { config, models, settings, user } from '$lib/stores';
import { createEventDispatcher, onMount, getContext } from 'svelte';
import { toast } from 'svelte-sonner';
const dispatch = createEventDispatcher();
@ -14,6 +14,7 @@
let titleAutoGenerate = true;
let responseAutoCopy = false;
let titleAutoGenerateModel = '';
let titleAutoGenerateModelExternal = '';
let fullScreenMode = false;
let titleGenerationPrompt = '';
@ -33,7 +34,12 @@
const toggleTitleAutoGenerate = async () => {
titleAutoGenerate = !titleAutoGenerate;
saveSettings({ titleAutoGenerate: titleAutoGenerate });
saveSettings({
title: {
...$settings.title,
auto: titleAutoGenerate
}
});
};
const toggleResponseAutoCopy = async () => {
@ -65,8 +71,13 @@
}
saveSettings({
titleAutoGenerateModel: titleAutoGenerateModel !== '' ? titleAutoGenerateModel : undefined,
titleGenerationPrompt: titleGenerationPrompt ? titleGenerationPrompt : undefined
title: {
...$settings.title,
model: titleAutoGenerateModel !== '' ? titleAutoGenerateModel : undefined,
modelExternal:
titleAutoGenerateModelExternal !== '' ? titleAutoGenerateModelExternal : undefined,
prompt: titleGenerationPrompt ? titleGenerationPrompt : undefined
}
});
};
@ -77,16 +88,18 @@
let settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
titleAutoGenerate = settings.titleAutoGenerate ?? true;
responseAutoCopy = settings.responseAutoCopy ?? false;
showUsername = settings.showUsername ?? false;
fullScreenMode = settings.fullScreenMode ?? false;
titleAutoGenerateModel = settings.titleAutoGenerateModel ?? '';
titleAutoGenerate = settings?.title?.auto ?? true;
titleAutoGenerateModel = settings?.title?.model ?? '';
titleAutoGenerateModelExternal = settings?.title?.modelExternal ?? '';
titleGenerationPrompt =
settings.titleGenerationPrompt ??
settings?.title?.prompt ??
$i18n.t(
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
) + ' {{prompt}}';
responseAutoCopy = settings.responseAutoCopy ?? false;
showUsername = settings.showUsername ?? false;
fullScreenMode = settings.fullScreenMode ?? false;
});
</script>
@ -190,8 +203,9 @@
<div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Title Auto-Generation Model')}</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<div class="flex w-full gap-2 pr-2">
<div class="flex-1">
<div class=" text-xs mb-1">Local Models</div>
<select
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
bind:value={titleAutoGenerateModel}
@ -207,6 +221,24 @@
{/each}
</select>
</div>
<div class="flex-1">
<div class=" text-xs mb-1">External Models</div>
<select
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
bind:value={titleAutoGenerateModelExternal}
placeholder={$i18n.t('Select a model')}
>
<option value="" selected>{$i18n.t('Current Model')}</option>
{#each $models as model}
{#if model.name !== 'hr'}
<option value={model.name} class="bg-gray-100 dark:bg-gray-700">
{model.name}
</option>
{/if}
{/each}
</select>
</div>
</div>
<div class="mt-3 mr-2">

View file

@ -0,0 +1,20 @@
<script lang="ts">
import { getContext } from 'svelte';
import Modal from '../common/Modal.svelte';
import Tags from '../common/Tags.svelte';
const i18n = getContext('i18n');
export let tags;
export let deleteTag: Function;
export let addTag: Function;
export let show = false;
</script>
<Modal bind:show size="xs">
<div class="px-4 pt-4 pb-5 w-full flex flex-col justify-center">
<Tags {tags} {deleteTag} {addTag} />
</div>
</Modal>

View file

@ -12,6 +12,9 @@
export let value = '';
export let placeholder = 'Select a model';
export let searchEnabled = true;
export let searchPlaceholder = 'Search a model';
export let items = [
{ value: 'mango', label: 'Mango' },
{ value: 'watermelon', label: 'Watermelon' },
@ -49,22 +52,25 @@
transition={flyAndScale}
sideOffset={4}
>
<slot>
{#if searchEnabled}
<div class="flex items-center gap-2.5 px-5 mt-3.5 mb-3">
<Search className="size-4" strokeWidth="2.5" />
<input
bind:value={searchValue}
class="w-full text-sm bg-transparent outline-none"
placeholder="Search a model"
placeholder={searchPlaceholder}
/>
</div>
<hr class="border-gray-100 dark:border-gray-800" />
{/if}
<div class="px-3 my-2 max-h-80 overflow-y-auto">
{#each filteredItems as item}
<Select.Item
class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 data-[highlighted]:bg-muted"
class="flex w-full font-medium line-clamp-1 select-none items-center rounded-button py-2 pl-3 pr-1.5 text-sm text-gray-700 dark:text-gray-100 outline-none transition-all duration-75 hover:bg-gray-100 dark:hover:bg-gray-850 rounded-lg cursor-pointer data-[highlighted]:bg-muted"
value={item.value}
label={item.label}
>
@ -77,11 +83,13 @@
{/if}
</Select.Item>
{:else}
<span class="block px-5 py-2 text-sm text-gray-700 dark:text-gray-100">
<div>
<div class="block px-5 py-2 text-sm text-gray-700 dark:text-gray-100">
No results found
</span>
</div>
</div>
{/each}
</div>
</slot>
</Select.Content>
<Select.Input name="favoriteFruit" />
</Select.Root>

View file

@ -8,7 +8,7 @@
export let addTag: Function;
</script>
<div class="flex flex-row space-x-0.5 line-clamp-1">
<div class="flex flex-row flex-wrap gap-0.5 line-clamp-1">
<TagList
{tags}
on:delete={(e) => {
@ -17,6 +17,7 @@
/>
<TagInput
label={tags.length == 0 ? 'Add Tags' : ''}
on:add={(e) => {
addTag(e.detail);
}}

View file

@ -1,24 +1,31 @@
<script lang="ts">
import { createEventDispatcher, getContext } from 'svelte';
import { tags } from '$lib/stores';
import { toast } from 'svelte-sonner';
const dispatch = createEventDispatcher();
const i18n = getContext('i18n');
export let label = '';
let showTagInput = false;
let tagName = '';
const addTagHandler = async () => {
tagName = tagName.trim();
if (tagName !== '') {
dispatch('add', tagName);
tagName = '';
showTagInput = false;
} else {
toast.error('Invalid Tag');
}
};
</script>
<div class="flex space-x-1 pl-1.5">
{#if showTagInput}
<div class="flex items-center">
<button
type="button"
on:click={() => {
dispatch('add', tagName);
tagName = '';
showTagInput = false;
}}
>
<button type="button" on:click={addTagHandler}>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
@ -34,12 +41,21 @@
</button>
<input
bind:value={tagName}
class=" pl-2 cursor-pointer self-center text-xs h-fit bg-transparent outline-none line-clamp-1 w-[8rem]"
class=" pl-2 cursor-pointer self-center text-xs h-fit bg-transparent outline-none line-clamp-1 w-[5.5rem]"
placeholder={$i18n.t('Add a tag')}
list="tagOptions"
on:keydown={(event) => {
if (event.key === 'Enter') {
addTagHandler();
}
}}
/>
<datalist id="tagOptions">
{#each $tags as tag}
<option value={tag.name} />
{/each}
</datalist>
</div>
<!-- TODO: Tag Suggestions -->
{/if}
<button
@ -62,4 +78,8 @@
</svg>
</div>
</button>
{#if label && !showTagInput}
<span class="text-xs pl-1.5 self-center">{label}</span>
{/if}
</div>

View file

@ -7,7 +7,7 @@
{#each tags as tag}
<div
class="px-2 py-0.5 space-x-1 flex h-fit items-center rounded-full transition border dark:border-gray-600 dark:text-white"
class="px-2 py-0.5 space-x-1 flex h-fit items-center rounded-full transition border dark:border-gray-800 dark:text-white"
>
<div class=" text-[0.7rem] font-medium self-center line-clamp-1">
{tag.name}

View file

@ -29,6 +29,6 @@
});
</script>
<div bind:this={tooltipElement} aria-label={content}>
<div bind:this={tooltipElement} aria-label={content} class="flex">
<slot />
</div>

View file

@ -0,0 +1,19 @@
<script lang="ts">
export let className = 'w-4 h-4';
export let strokeWidth = '1.5';
</script>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width={strokeWidth}
stroke="currentColor"
class={className}
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M8.25 15 12 18.75 15.75 15m-7.5-6L12 5.25 15.75 9"
/>
</svg>

View file

@ -0,0 +1,19 @@
<script lang="ts">
export let className = 'w-4 h-4';
export let strokeWidth = '1.5';
</script>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width={strokeWidth}
stroke="currentColor"
class={className}
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M12 6.75a.75.75 0 1 1 0-1.5.75.75 0 0 1 0 1.5ZM12 12.75a.75.75 0 1 1 0-1.5.75.75 0 0 1 0 1.5ZM12 18.75a.75.75 0 1 1 0-1.5.75.75 0 0 1 0 1.5Z"
/>
</svg>

View file

@ -4,11 +4,21 @@
import fileSaver from 'file-saver';
const { saveAs } = fileSaver;
import { Separator } from 'bits-ui';
import { getChatById } from '$lib/apis/chats';
import { WEBUI_NAME, chatId, modelfiles, settings } from '$lib/stores';
import { WEBUI_NAME, chatId, modelfiles, settings, showSettings } from '$lib/stores';
import { slide } from 'svelte/transition';
import ShareChatModal from '../chat/ShareChatModal.svelte';
import TagInput from '../common/Tags/TagInput.svelte';
import Tags from '../common/Tags.svelte';
import ModelSelector from '../chat/ModelSelector.svelte';
import Tooltip from '../common/Tooltip.svelte';
import EllipsisVertical from '../icons/EllipsisVertical.svelte';
import ChevronDown from '../icons/ChevronDown.svelte';
import ChevronUpDown from '../icons/ChevronUpDown.svelte';
import Menu from './Navbar/Menu.svelte';
import TagChatModal from '../chat/TagChatModal.svelte';
const i18n = getContext('i18n');
@ -16,14 +26,16 @@
export let title: string = $WEBUI_NAME;
export let shareEnabled: boolean = false;
export let selectedModels;
export let tags = [];
export let addTag: Function;
export let deleteTag: Function;
let showShareChatModal = false;
export let showModelSelector = false;
let tagName = '';
let showTagInput = false;
let showShareChatModal = false;
let showTagChatModal = false;
const shareChat = async () => {
const chat = (await getChatById(localStorage.token, $chatId)).chat;
@ -69,48 +81,27 @@
</script>
<ShareChatModal bind:show={showShareChatModal} {downloadChat} {shareChat} />
<!-- <TagChatModal bind:show={showTagChatModal} {tags} {deleteTag} {addTag} /> -->
<nav id="nav" class=" sticky py-2.5 top-0 flex flex-row justify-center z-30">
<div
class=" flex {$settings?.fullScreenMode ?? null
? 'max-w-full'
: 'max-w-3xl'} w-full mx-auto px-3"
class=" flex {$settings?.fullScreenMode ?? null ? 'max-w-full' : 'max-w-3xl'}
w-full mx-auto px-3"
>
<!-- {#if shareEnabled}
<div class="flex items-center w-full max-w-full">
<div class="pr-2 self-start">
<button
id="new-chat-button"
class=" cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-lg transition"
on:click={initNewChat}
>
<div class=" m-auto self-center">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-5 h-5"
>
<path
d="M5.433 13.917l1.262-3.155A4 4 0 017.58 9.42l6.92-6.918a2.121 2.121 0 013 3l-6.92 6.918c-.383.383-.84.685-1.343.886l-3.154 1.262a.5.5 0 01-.65-.65z"
/>
<path
d="M3.5 5.75c0-.69.56-1.25 1.25-1.25H10A.75.75 0 0010 3H4.75A2.75 2.75 0 002 5.75v9.5A2.75 2.75 0 004.75 18h9.5A2.75 2.75 0 0017 15.25V10a.75.75 0 00-1.5 0v5.25c0 .69-.56 1.25-1.25 1.25h-9.5c-.69 0-1.25-.56-1.25-1.25v-9.5z"
/>
</svg>
</div>
</button>
</div>
<div class=" flex-1 self-center font-medium line-clamp-1">
<div>
{title != '' ? title : $WEBUI_NAME}
</div>
</div>
<div class="pl-2 self-center flex items-center space-x-2">
{#if shareEnabled}
<div class="pl-2 self-center flex items-center">
<div class=" mr-1">
<Tags {tags} {deleteTag} {addTag} />
</div>
<Tooltip content="Share">
<button
class=" cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-lg transition"
class="cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-full transition"
on:click={async () => {
showShareChatModal = !showShareChatModal;
@ -132,7 +123,112 @@
</svg>
</div>
</button>
</Tooltip>
</div>
</div>
{/if} -->
<!-- <div class=" flex-1 self-center font-medium line-clamp-1">
<div>
{title != '' ? title : $WEBUI_NAME}
</div>
</div> -->
<div class="flex items-center w-full max-w-full">
<div class="w-full flex-1 overflow-hidden max-w-full">
<ModelSelector bind:selectedModels />
</div>
<div class="self-start flex flex-none items-center">
<div class="flex self-center w-[1px] h-5 mx-2 bg-stone-700" />
{#if !shareEnabled}
<Tooltip content="Settings">
<button
class="cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-full transition"
id="open-settings-button"
on:click={async () => {
await showSettings.set(!$showSettings);
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-5 h-5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.325.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 0 1 1.37.49l1.296 2.247a1.125 1.125 0 0 1-.26 1.431l-1.003.827c-.293.241-.438.613-.43.992a7.723 7.723 0 0 1 0 .255c-.008.378.137.75.43.991l1.004.827c.424.35.534.955.26 1.43l-1.298 2.247a1.125 1.125 0 0 1-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.47 6.47 0 0 1-.22.128c-.331.183-.581.495-.644.869l-.213 1.281c-.09.543-.56.94-1.11.94h-2.594c-.55 0-1.019-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 0 1-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 0 1-1.369-.49l-1.297-2.247a1.125 1.125 0 0 1 .26-1.431l1.004-.827c.292-.24.437-.613.43-.991a6.932 6.932 0 0 1 0-.255c.007-.38-.138-.751-.43-.992l-1.004-.827a1.125 1.125 0 0 1-.26-1.43l1.297-2.247a1.125 1.125 0 0 1 1.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.086.22-.128.332-.183.582-.495.644-.869l.214-1.28Z"
/>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M15 12a3 3 0 1 1-6 0 3 3 0 0 1 6 0Z"
/>
</svg>
</button>
</Tooltip>
{:else}
<Menu
{shareEnabled}
shareHandler={() => {
showShareChatModal = !showShareChatModal;
}}
{tags}
{deleteTag}
{addTag}
>
<button
class="cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-full transition"
>
<div class=" m-auto self-center">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="size-5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M6.75 12a.75.75 0 1 1-1.5 0 .75.75 0 0 1 1.5 0ZM12.75 12a.75.75 0 1 1-1.5 0 .75.75 0 0 1 1.5 0ZM18.75 12a.75.75 0 1 1-1.5 0 .75.75 0 0 1 1.5 0Z"
/>
</svg>
</div>
</button>
</Menu>
{/if}
<Tooltip content="New Chat">
<button
id="new-chat-button"
class=" cursor-pointer p-1.5 flex dark:hover:bg-gray-700 rounded-full transition"
on:click={() => {
initNewChat();
}}
>
<div class=" m-auto self-center">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-5 h-5"
>
<path
d="M5.433 13.917l1.262-3.155A4 4 0 017.58 9.42l6.92-6.918a2.121 2.121 0 013 3l-6.92 6.918c-.383.383-.84.685-1.343.886l-3.154 1.262a.5.5 0 01-.65-.65z"
/>
<path
d="M3.5 5.75c0-.69.56-1.25 1.25-1.25H10A.75.75 0 0010 3H4.75A2.75 2.75 0 002 5.75v9.5A2.75 2.75 0 004.75 18h9.5A2.75 2.75 0 0017 15.25V10a.75.75 0 00-1.5 0v5.25c0 .69-.56 1.25-1.25 1.25h-9.5c-.69 0-1.25-.56-1.25-1.25v-9.5z"
/>
</svg>
</div>
</button>
</Tooltip>
</div>
</div>
</div>

View file

@ -0,0 +1,123 @@
<script lang="ts">
import { DropdownMenu } from 'bits-ui';
import { flyAndScale } from '$lib/utils/transitions';
import Dropdown from '$lib/components/common/Dropdown.svelte';
import GarbageBin from '$lib/components/icons/GarbageBin.svelte';
import Pencil from '$lib/components/icons/Pencil.svelte';
import Tooltip from '$lib/components/common/Tooltip.svelte';
import { showSettings } from '$lib/stores';
import Tags from '$lib/components/common/Tags.svelte';
export let shareEnabled: boolean = false;
export let shareHandler: Function;
// export let tagHandler: Function;
export let tags;
export let deleteTag: Function;
export let addTag: Function;
export let onClose: Function = () => {};
</script>
<Dropdown
on:change={(e) => {
if (e.detail === false) {
onClose();
}
}}
>
<slot />
<div slot="content">
<DropdownMenu.Content
class="w-full max-w-[150px] rounded-lg px-1 py-1.5 border border-gray-300/30 dark:border-gray-700/50 z-50 bg-white dark:bg-gray-850 dark:text-white shadow"
sideOffset={8}
side="bottom"
align="end"
transition={flyAndScale}
>
<DropdownMenu.Item
class="flex gap-2 items-center px-3 py-2 text-sm font-medium cursor-pointer"
on:click={async () => {
await showSettings.set(!$showSettings);
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-5 h-5"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.594 3.94c.09-.542.56-.94 1.11-.94h2.593c.55 0 1.02.398 1.11.94l.213 1.281c.063.374.313.686.645.87.074.04.147.083.22.127.325.196.72.257 1.075.124l1.217-.456a1.125 1.125 0 0 1 1.37.49l1.296 2.247a1.125 1.125 0 0 1-.26 1.431l-1.003.827c-.293.241-.438.613-.43.992a7.723 7.723 0 0 1 0 .255c-.008.378.137.75.43.991l1.004.827c.424.35.534.955.26 1.43l-1.298 2.247a1.125 1.125 0 0 1-1.369.491l-1.217-.456c-.355-.133-.75-.072-1.076.124a6.47 6.47 0 0 1-.22.128c-.331.183-.581.495-.644.869l-.213 1.281c-.09.543-.56.94-1.11.94h-2.594c-.55 0-1.019-.398-1.11-.94l-.213-1.281c-.062-.374-.312-.686-.644-.87a6.52 6.52 0 0 1-.22-.127c-.325-.196-.72-.257-1.076-.124l-1.217.456a1.125 1.125 0 0 1-1.369-.49l-1.297-2.247a1.125 1.125 0 0 1 .26-1.431l1.004-.827c.292-.24.437-.613.43-.991a6.932 6.932 0 0 1 0-.255c.007-.38-.138-.751-.43-.992l-1.004-.827a1.125 1.125 0 0 1-.26-1.43l1.297-2.247a1.125 1.125 0 0 1 1.37-.491l1.216.456c.356.133.751.072 1.076-.124.072-.044.146-.086.22-.128.332-.183.582-.495.644-.869l.214-1.28Z"
/>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M15 12a3 3 0 1 1-6 0 3 3 0 0 1 6 0Z"
/>
</svg>
<div class="flex items-center">Settings</div>
</DropdownMenu.Item>
{#if shareEnabled}
<DropdownMenu.Item
class="flex gap-2 items-center px-3 py-2 text-sm font-medium cursor-pointer"
on:click={() => {
shareHandler();
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 24 24"
fill="currentColor"
class="size-4"
>
<path
fill-rule="evenodd"
d="M15.75 4.5a3 3 0 1 1 .825 2.066l-8.421 4.679a3.002 3.002 0 0 1 0 1.51l8.421 4.679a3 3 0 1 1-.729 1.31l-8.421-4.678a3 3 0 1 1 0-4.132l8.421-4.679a3 3 0 0 1-.096-.755Z"
clip-rule="evenodd"
/>
</svg>
<div class="flex items-center">Share</div>
</DropdownMenu.Item>
<hr class="border-gray-100 dark:border-gray-800 my-1" />
<div class="flex p-1">
<Tags {tags} {deleteTag} {addTag} />
</div>
<!-- <DropdownMenu.Item
class="flex gap-2 items-center px-3 py-2 text-sm font-medium cursor-pointer"
on:click={() => {
tagHandler();
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="2"
stroke="currentColor"
class="size-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.568 3H5.25A2.25 2.25 0 0 0 3 5.25v4.318c0 .597.237 1.17.659 1.591l9.581 9.581c.699.699 1.78.872 2.607.33a18.095 18.095 0 0 0 5.223-5.223c.542-.827.369-1.908-.33-2.607L11.16 3.66A2.25 2.25 0 0 0 9.568 3Z"
/>
<path stroke-linecap="round" stroke-linejoin="round" d="M6 6h.008v.008H6V6Z" />
</svg>
<div class="flex items-center">Tag</div>
</DropdownMenu.Item> -->
{/if}
</DropdownMenu.Content>
</div>
</Dropdown>

View file

@ -107,7 +107,7 @@
bind:this={navElement}
class="h-screen max-h-[100dvh] min-h-screen {show
? 'lg:relative w-[260px]'
: '-translate-x-[260px] w-[0px]'} bg-gray-50 text-gray-900 dark:bg-gray-950 dark:text-gray-200 text-sm transition z-40 fixed top-0 left-0
: '-translate-x-[260px] w-[0px]'} bg-gray-50 text-gray-900 dark:bg-gray-950 dark:text-gray-200 text-sm transition fixed z-50 top-0 left-0
"
>
<div
@ -706,7 +706,7 @@
</div>
<div
class="fixed left-0 top-[50dvh] z-40 -translate-y-1/2 transition-transform translate-x-[255px] md:translate-x-[260px] rotate-0"
class="fixed left-0 top-[50dvh] -translate-y-1/2 transition-transform translate-x-[255px] md:translate-x-[260px] rotate-0"
>
<Tooltip
placement="right"

View file

@ -22,6 +22,7 @@ export const SUPPORTED_FILE_TYPE = [
'text/plain',
'text/csv',
'text/xml',
'text/html',
'text/x-python',
'text/css',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
@ -50,6 +51,8 @@ export const SUPPORTED_FILE_EXTENSIONS = [
'h',
'c',
'cs',
'htm',
'html',
'sql',
'log',
'ini',

View file

@ -37,23 +37,28 @@ const createIsLoadingStore = (i18n: i18nType) => {
return isLoading;
};
export const initI18n = (defaultLocale: string) => {
let detectionOrder = defaultLocale
? ['querystring', 'localStorage']
: ['querystring', 'localStorage', 'navigator'];
let fallbackDefaultLocale = defaultLocale ? [defaultLocale] : ['en-US'];
const loadResource = (language: string, namespace: string) =>
import(`./locales/${language}/${namespace}.json`);
i18next
.use(
resourcesToBackend(
(language: string, namespace: string) => import(`./locales/${language}/${namespace}.json`)
)
)
.use(resourcesToBackend(loadResource))
.use(LanguageDetector)
.init({
debug: false,
detection: {
order: ['querystring', 'localStorage', 'navigator'],
order: detectionOrder,
caches: ['localStorage'],
lookupQuerystring: 'lang',
lookupLocalStorage: 'locale'
},
fallbackLng: {
default: ['en-US']
default: fallbackDefaultLocale
},
ns: 'translation',
returnEmptyString: false,
@ -61,6 +66,7 @@ i18next
escapeValue: false // not needed for svelte as it escapes by default
}
});
};
const i18n = createI18nStore(i18next);
const isLoadingStore = createIsLoadingStore(i18next);

View file

@ -279,7 +279,7 @@
"Send a Message": "Изпращане на Съобщение",
"Send message": "Изпращане на съобщение",
"Server connection verified": "Server connection verified",
"Set as default": "Задай като по подразбиране",
"Set as default": "Задай по подразбиране",
"Set Default Model": "Задай Модел По Подразбиране",
"Set Image Size": "Задай Размер на Изображението",
"Set Steps": "Задай Стъпки",
@ -320,7 +320,7 @@
"Title": "Заглавие",
"Title Auto-Generation": "Автоматично Генериране на Заглавие",
"Title Generation Prompt": "Промпт за Генериране на Заглавие",
"to": "до",
"to": "в",
"To access the available model names for downloading,": "За да получите достъп до наличните имена на модели за изтегляне,",
"To access the GGUF models available for downloading,": "За да получите достъп до GGUF моделите, налични за изтегляне,",
"to chat input.": "към чат входа.",

View file

@ -1,128 +1,128 @@
{
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.",
"(Beta)": "(Beta)",
"(e.g. `sh webui.sh --api`)": "(e.g. `sh webui.sh --api`)",
"(latest)": "(latest)",
"{{modelName}} is thinking...": "{{modelName}} is thinking...",
"{{webUIName}} Backend Required": "{{webUIName}} Backend Required",
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "",
"(Beta)": "",
"(e.g. `sh webui.sh --api`)": "",
"(latest)": "",
"{{modelName}} is thinking...": "",
"{{webUIName}} Backend Required": "",
"a user": "",
"About": "About",
"Account": "Account",
"Action": "Action",
"Add a model": "Add a model",
"Add a model tag name": "Add a model tag name",
"Add a short description about what this modelfile does": "Add a short description about what this modelfile does",
"Add a short title for this prompt": "Add a short title for this prompt",
"Add a tag": "Add a tag",
"Add Docs": "Add Docs",
"Add Files": "Add Files",
"Add message": "Add message",
"add tags": "add tags",
"Adjusting these settings will apply changes universally to all users.": "Adjusting these settings will apply changes universally to all users.",
"admin": "admin",
"Admin Panel": "Admin Panel",
"Admin Settings": "Admin Settings",
"Advanced Parameters": "Advanced Parameters",
"all": "all",
"All Users": "All Users",
"Allow": "Allow",
"Allow Chat Deletion": "Allow Chat Deletion",
"alphanumeric characters and hyphens": "alphanumeric characters and hyphens",
"Already have an account?": "Already have an account?",
"About": "",
"Account": "",
"Action": "",
"Add a model": "",
"Add a model tag name": "",
"Add a short description about what this modelfile does": "",
"Add a short title for this prompt": "",
"Add a tag": "",
"Add Docs": "",
"Add Files": "",
"Add message": "",
"add tags": "",
"Adjusting these settings will apply changes universally to all users.": "",
"admin": "",
"Admin Panel": "",
"Admin Settings": "",
"Advanced Parameters": "",
"all": "",
"All Users": "",
"Allow": "",
"Allow Chat Deletion": "",
"alphanumeric characters and hyphens": "",
"Already have an account?": "",
"an assistant": "",
"and": "and",
"API Base URL": "API Base URL",
"API Key": "API Key",
"API RPM": "API RPM",
"are allowed - Activate this command by typing": "are allowed - Activate this command by typing",
"and": "",
"API Base URL": "",
"API Key": "",
"API RPM": "",
"are allowed - Activate this command by typing": "",
"Are you sure?": "",
"Audio": "Audio",
"Auto-playback response": "Auto-playback response",
"Auto-send input after 3 sec.": "Auto-send input after 3 sec.",
"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Base URL",
"Audio": "",
"Auto-playback response": "",
"Auto-send input after 3 sec.": "",
"AUTOMATIC1111 Base URL": "",
"AUTOMATIC1111 Base URL is required.": "",
"available!": "available!",
"Back": "Back",
"Builder Mode": "Builder Mode",
"Cancel": "Cancel",
"Categories": "Categories",
"Change Password": "Change Password",
"Chat": "Chat",
"Chat History": "Chat History",
"Chat History is off for this browser.": "Chat History is off for this browser.",
"Chats": "Chats",
"Check Again": "Check Again",
"Check for updates": "Check for updates",
"Checking for updates...": "Checking for updates...",
"Choose a model before saving...": "Choose a model before saving...",
"Chunk Overlap": "Chunk Overlap",
"Chunk Params": "Chunk Params",
"Chunk Size": "Chunk Size",
"Click here for help.": "Click here for help.",
"Click here to check other modelfiles.": "Click here to check other modelfiles.",
"available!": "",
"Back": "",
"Builder Mode": "",
"Cancel": "",
"Categories": "",
"Change Password": "",
"Chat": "",
"Chat History": "",
"Chat History is off for this browser.": "",
"Chats": "",
"Check Again": "",
"Check for updates": "",
"Checking for updates...": "",
"Choose a model before saving...": "",
"Chunk Overlap": "",
"Chunk Params": "",
"Chunk Size": "",
"Click here for help.": "",
"Click here to check other modelfiles.": "",
"Click here to select": "",
"Click here to select documents.": "",
"click here.": "click here.",
"Click on the user role button to change a user's role.": "Click on the user role button to change a user's role.",
"Close": "Close",
"Collection": "Collection",
"Command": "Command",
"Confirm Password": "Confirm Password",
"Connections": "Connections",
"Content": "Content",
"Context Length": "Context Length",
"Conversation Mode": "Conversation Mode",
"Copy last code block": "Copy last code block",
"Copy last response": "Copy last response",
"Copying to clipboard was successful!": "Copying to clipboard was successful!",
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':": "Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':",
"Create a modelfile": "Create a modelfile",
"Create Account": "Create Account",
"Created at": "Created at",
"Created by": "Created by",
"Current Model": "Current Model",
"Current Password": "Current Password",
"Custom": "Custom",
"Customize Ollama models for a specific purpose": "Customize Ollama models for a specific purpose",
"Dark": "Dark",
"Database": "Database",
"DD/MM/YYYY HH:mm": "DD/MM/YYYY HH:mm",
"Default": "Default",
"click here.": "",
"Click on the user role button to change a user's role.": "",
"Close": "",
"Collection": "",
"Command": "",
"Confirm Password": "",
"Connections": "",
"Content": "",
"Context Length": "",
"Conversation Mode": "",
"Copy last code block": "",
"Copy last response": "",
"Copying to clipboard was successful!": "",
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':": "",
"Create a modelfile": "",
"Create Account": "",
"Created at": "",
"Created by": "",
"Current Model": "",
"Current Password": "",
"Custom": "",
"Customize Ollama models for a specific purpose": "",
"Dark": "",
"Database": "",
"DD/MM/YYYY HH:mm": "",
"Default": "",
"Default (Automatic1111)": "",
"Default (Web API)": "Default (Web API)",
"Default model updated": "Default model updated",
"Default Prompt Suggestions": "Default Prompt Suggestions",
"Default User Role": "Default User Role",
"delete": "delete",
"Delete a model": "Delete a model",
"Delete chat": "Delete chat",
"Delete Chats": "Delete Chats",
"Deleted {{deleteModelTag}}": "Deleted {{deleteModelTag}}",
"Deleted {tagName}": "Deleted {tagName}",
"Description": "Description",
"Desktop Notifications": "Notification",
"Disabled": "Disabled",
"Discover a modelfile": "Discover a modelfile",
"Discover a prompt": "Discover a prompt",
"Discover, download, and explore custom prompts": "Discover, download, and explore custom prompts",
"Discover, download, and explore model presets": "Discover, download, and explore model presets",
"Display the username instead of You in the Chat": "Display the username instead of 'You' in the Chat",
"Document": "Document",
"Document Settings": "Document Settings",
"Documents": "Documents",
"does not make any external connections, and your data stays securely on your locally hosted server.": "does not make any external connections, and your data stays securely on your locally hosted server.",
"Don't Allow": "Don't Allow",
"Don't have an account?": "Don't have an account?",
"Download as a File": "Download as a File",
"Download Database": "Download Database",
"Drop any files here to add to the conversation": "Drop any files here to add to the conversation",
"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.",
"Edit Doc": "Edit Doc",
"Edit User": "Edit User",
"Email": "Email",
"Enable Chat History": "Enable Chat History",
"Enable New Sign Ups": "Enable New Sign Ups",
"Enabled": "Enabled",
"Default (Web API)": "",
"Default model updated": "",
"Default Prompt Suggestions": "",
"Default User Role": "",
"delete": "",
"Delete a model": "",
"Delete chat": "",
"Delete Chats": "",
"Deleted {{deleteModelTag}}": "",
"Deleted {tagName}": "",
"Description": "",
"Desktop Notifications": "",
"Disabled": "",
"Discover a modelfile": "",
"Discover a prompt": "",
"Discover, download, and explore custom prompts": "",
"Discover, download, and explore model presets": "",
"Display the username instead of You in the Chat": "",
"Document": "",
"Document Settings": "",
"Documents": "",
"does not make any external connections, and your data stays securely on your locally hosted server.": "",
"Don't Allow": "",
"Don't have an account?": "",
"Download as a File": "",
"Download Database": "",
"Drop any files here to add to the conversation": "",
"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "",
"Edit Doc": "",
"Edit User": "",
"Email": "",
"Enable Chat History": "",
"Enable New Sign Ups": "",
"Enabled": "",
"Enter {{role}} message here": "",
"Enter API Key": "",
"Enter Chunk Overlap": "",
@ -135,229 +135,229 @@
"Enter Max Tokens (litellm_params.max_tokens)": "",
"Enter model tag (e.g. {{modelTag}})": "",
"Enter Number of Steps (e.g. 50)": "",
"Enter stop sequence": "Enter stop sequence",
"Enter stop sequence": "",
"Enter Top K": "",
"Enter URL (e.g. http://127.0.0.1:7860/)": "",
"Enter Your Email": "Enter Your Email",
"Enter Your Full Name": "Enter Your Full Name",
"Enter Your Password": "Enter Your Password",
"Experimental": "Experimental",
"Export All Chats (All Users)": "Export All Chats (All Users)",
"Export Chats": "Export Chats",
"Export Documents Mapping": "Export Documents Mapping",
"Export Modelfiles": "Export Modelfiles",
"Export Prompts": "Export Prompts",
"Failed to read clipboard contents": "Failed to read clipboard contents",
"File Mode": "File Mode",
"File not found.": "File not found.",
"Focus chat input": "Focus chat input",
"Format your variables using square brackets like this:": "Format your variables using square brackets like this:",
"From (Base Model)": "From (Base Model)",
"Full Screen Mode": "Full Screen Mode",
"General": "General",
"General Settings": "General Settings",
"Hello, {{name}}": "Hello, {{name}}",
"Hide": "Hide",
"Hide Additional Params": "Hide Additional Params",
"How can I help you today?": "How can I help you today?",
"Image Generation (Experimental)": "Image Generation (Experimental)",
"Enter Your Email": "",
"Enter Your Full Name": "",
"Enter Your Password": "",
"Experimental": "",
"Export All Chats (All Users)": "",
"Export Chats": "",
"Export Documents Mapping": "",
"Export Modelfiles": "",
"Export Prompts": "",
"Failed to read clipboard contents": "",
"File Mode": "",
"File not found.": "",
"Focus chat input": "",
"Format your variables using square brackets like this:": "",
"From (Base Model)": "",
"Full Screen Mode": "",
"General": "",
"General Settings": "",
"Hello, {{name}}": "",
"Hide": "",
"Hide Additional Params": "",
"How can I help you today?": "",
"Image Generation (Experimental)": "",
"Image Generation Engine": "",
"Image Settings": "Image Settings",
"Images": "Images",
"Import Chats": "Import Chats",
"Import Documents Mapping": "Import Documents Mapping",
"Import Modelfiles": "Import Modelfiles",
"Import Prompts": "Import Prompts",
"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
"Interface": "Interface",
"join our Discord for help.": "join our Discord for help.",
"JSON": "JSON",
"JWT Expiration": "JWT Expiration",
"JWT Token": "JWT Token",
"Keep Alive": "Keep Alive",
"Keyboard shortcuts": "Keyboard shortcuts",
"Language": "Language",
"Light": "Light",
"Listening...": "Listening...",
"LLMs can make mistakes. Verify important information.": "LLMs can make mistakes. Verify important information.",
"Made by OpenWebUI Community": "Made by OpenWebUI Community",
"Make sure to enclose them with": "Make sure to enclose them with",
"Manage LiteLLM Models": "Manage LiteLLM Models",
"Manage Models": "Manage Models",
"Manage Ollama Models": "Manage Ollama Models",
"Max Tokens": "Max Tokens",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maximum of 3 models can be downloaded simultaneously. Please try again later.",
"Mirostat": "Mirostat",
"Mirostat Eta": "Mirostat Eta",
"Mirostat Tau": "Mirostat Tau",
"MMMM DD, YYYY": "MMMM DD, YYYY",
"Model '{{modelName}}' has been successfully downloaded.": "Model '{{modelName}}' has been successfully downloaded.",
"Model '{{modelTag}}' is already in queue for downloading.": "Model '{{modelTag}}' is already in queue for downloading.",
"Model {{modelId}} not found": "Model {{modelId}} not found",
"Model {{modelName}} already exists.": "Model {{modelName}} already exists.",
"Model Name": "Model Name",
"Model not selected": "Model not selected",
"Model Tag Name": "Model Tag Name",
"Model Whitelisting": "Model Whitelisting",
"Model(s) Whitelisted": "Model(s) Whitelisted",
"Modelfile": "Modelfile",
"Modelfile Advanced Settings": "Modelfile Advanced Settings",
"Modelfile Content": "Modelfile Content",
"Modelfiles": "Modelfiles",
"Models": "Models",
"My Documents": "My Documents",
"My Modelfiles": "My Modelfiles",
"My Prompts": "My Prompts",
"Name": "Name",
"Name Tag": "Name Tag",
"Name your modelfile": "Name your modelfile",
"New Chat": "New Chat",
"New Password": "New Password",
"Not sure what to add?": "Not sure what to add?",
"Not sure what to write? Switch to": "Not sure what to write? Switch to",
"Off": "Off",
"Okay, Let's Go!": "Okay, Let's Go!",
"Image Settings": "",
"Images": "",
"Import Chats": "",
"Import Documents Mapping": "",
"Import Modelfiles": "",
"Import Prompts": "",
"Include `--api` flag when running stable-diffusion-webui": "",
"Interface": "",
"join our Discord for help.": "",
"JSON": "",
"JWT Expiration": "",
"JWT Token": "",
"Keep Alive": "",
"Keyboard shortcuts": "",
"Language": "",
"Light": "",
"Listening...": "",
"LLMs can make mistakes. Verify important information.": "",
"Made by OpenWebUI Community": "",
"Make sure to enclose them with": "",
"Manage LiteLLM Models": "",
"Manage Models": "",
"Manage Ollama Models": "",
"Max Tokens": "",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
"Mirostat": "",
"Mirostat Eta": "",
"Mirostat Tau": "",
"MMMM DD, YYYY": "",
"Model '{{modelName}}' has been successfully downloaded.": "",
"Model '{{modelTag}}' is already in queue for downloading.": "",
"Model {{modelId}} not found": "",
"Model {{modelName}} already exists.": "",
"Model Name": "",
"Model not selected": "",
"Model Tag Name": "",
"Model Whitelisting": "",
"Model(s) Whitelisted": "",
"Modelfile": "",
"Modelfile Advanced Settings": "",
"Modelfile Content": "",
"Modelfiles": "",
"Models": "",
"My Documents": "",
"My Modelfiles": "",
"My Prompts": "",
"Name": "",
"Name Tag": "",
"Name your modelfile": "",
"New Chat": "",
"New Password": "",
"Not sure what to add?": "",
"Not sure what to write? Switch to": "",
"Off": "",
"Okay, Let's Go!": "",
"Ollama Base URL": "",
"Ollama Version": "Ollama Version",
"On": "On",
"Only": "Only",
"Only alphanumeric characters and hyphens are allowed in the command string.": "Only alphanumeric characters and hyphens are allowed in the command string.",
"Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.": "Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.",
"Oops! Looks like the URL is invalid. Please double-check and try again.": "Oops! Looks like the URL is invalid. Please double-check and try again.",
"Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.",
"Open": "Open",
"Open AI": "Open AI",
"Ollama Version": "",
"On": "",
"Only": "",
"Only alphanumeric characters and hyphens are allowed in the command string.": "",
"Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.": "",
"Oops! Looks like the URL is invalid. Please double-check and try again.": "",
"Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "",
"Open": "",
"Open AI": "",
"Open AI (Dall-E)": "",
"Open new chat": "Open new chat",
"OpenAI API": "OpenAI API",
"Open new chat": "",
"OpenAI API": "",
"OpenAI API Key": "",
"OpenAI API Key is required.": "",
"or": "or",
"Parameters": "Parameters",
"Password": "Password",
"PDF Extract Images (OCR)": "PDF Extract Images (OCR)",
"pending": "pending",
"Permission denied when accessing microphone: {{error}}": "Permission denied when accessing microphone: {{error}}",
"Playground": "Playground",
"Profile": "Profile",
"Prompt Content": "Prompt Content",
"Prompt suggestions": "Prompt suggestions",
"Prompts": "Prompts",
"Pull a model from Ollama.com": "Pull a model from Ollama.com",
"Pull Progress": "Pull Progress",
"Query Params": "Query Params",
"RAG Template": "RAG Template",
"Raw Format": "Raw Format",
"Record voice": "Record voice",
"Redirecting you to OpenWebUI Community": "Redirecting you to OpenWebUI Community",
"Release Notes": "Release Notes",
"Repeat Last N": "Repeat Last N",
"Repeat Penalty": "Repeat Penalty",
"Request Mode": "Request Mode",
"Reset Vector Storage": "Reset Vector Storage",
"Response AutoCopy to Clipboard": "Response AutoCopy to Clipboard",
"Role": "Role",
"Rosé Pine": "Rosé Pine",
"Rosé Pine Dawn": "Rosé Pine Dawn",
"Save": "Save",
"Save & Create": "Save & Create",
"Save & Submit": "Save & Submit",
"Save & Update": "Save & Update",
"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through",
"Scan": "Scan",
"Scan complete!": "Scan complete!",
"Scan for documents from {{path}}": "Scan for documents from {{path}}",
"Search": "Search",
"Search Documents": "Search Documents",
"Search Prompts": "Search Prompts",
"See readme.md for instructions": "See readme.md for instructions",
"See what's new": "See what's new",
"Seed": "Seed",
"or": "",
"Parameters": "",
"Password": "",
"PDF Extract Images (OCR)": "",
"pending": "",
"Permission denied when accessing microphone: {{error}}": "",
"Playground": "",
"Profile": "",
"Prompt Content": "",
"Prompt suggestions": "",
"Prompts": "",
"Pull a model from Ollama.com": "",
"Pull Progress": "",
"Query Params": "",
"RAG Template": "",
"Raw Format": "",
"Record voice": "",
"Redirecting you to OpenWebUI Community": "",
"Release Notes": "",
"Repeat Last N": "",
"Repeat Penalty": "",
"Request Mode": "",
"Reset Vector Storage": "",
"Response AutoCopy to Clipboard": "",
"Role": "",
"Rosé Pine": "",
"Rosé Pine Dawn": "",
"Save": "",
"Save & Create": "",
"Save & Submit": "",
"Save & Update": "",
"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "",
"Scan": "",
"Scan complete!": "",
"Scan for documents from {{path}}": "",
"Search": "",
"Search Documents": "",
"Search Prompts": "",
"See readme.md for instructions": "",
"See what's new": "",
"Seed": "",
"Select a mode": "",
"Select a model": "Select a model",
"Select a model": "",
"Select an Ollama instance": "",
"Send a Message": "Send a Message",
"Send message": "Send message",
"Server connection verified": "Server connection verified",
"Set as default": "Set as default",
"Set Default Model": "Set Default Model",
"Set Image Size": "Set Image Size",
"Set Steps": "Set Steps",
"Set Title Auto-Generation Model": "Set Title Auto-Generation Model",
"Set Voice": "Set Voice",
"Settings": "Settings",
"Settings saved successfully!": "Settings saved successfully!",
"Share to OpenWebUI Community": "Share to OpenWebUI Community",
"short-summary": "short-summary",
"Show": "Show",
"Show Additional Params": "Show Additional Params",
"Show shortcuts": "Show shortcuts",
"sidebar": "sidebar",
"Sign in": "Sign in",
"Sign Out": "Sign Out",
"Sign up": "Sign up",
"Speech recognition error: {{error}}": "Speech recognition error: {{error}}",
"Speech-to-Text Engine": "Speech-to-Text Engine",
"SpeechRecognition API is not supported in this browser.": "SpeechRecognition API is not supported in this browser.",
"Stop Sequence": "Stop Sequence",
"STT Settings": "STT Settings",
"Submit": "Submit",
"Success": "Success",
"Successfully updated.": "Successfully updated.",
"Sync All": "Sync All",
"System": "System",
"System Prompt": "System Prompt",
"Tags": "Tags",
"Temperature": "Temperature",
"Template": "Template",
"Text Completion": "Text Completion",
"Text-to-Speech Engine": "Text-to-Speech Engine",
"Tfs Z": "Tfs Z",
"Theme": "Theme",
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you!",
"This setting does not sync across browsers or devices.": "This setting does not sync across browsers or devices.",
"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.",
"Title": "Title",
"Title Auto-Generation": "Title Auto-Generation",
"Title Generation Prompt": "Title Generation Prompt",
"to": "to",
"To access the available model names for downloading,": "To access the available model names for downloading,",
"To access the GGUF models available for downloading,": "To access the GGUF models available for downloading,",
"to chat input.": "to chat input.",
"Toggle settings": "Toggle settings",
"Toggle sidebar": "Toggle sidebar",
"Top K": "Top K",
"Top P": "Top P",
"Trouble accessing Ollama?": "Trouble accessing Ollama?",
"TTS Settings": "TTS Settings",
"Send a Message": "",
"Send message": "",
"Server connection verified": "",
"Set as default": "",
"Set Default Model": "",
"Set Image Size": "",
"Set Steps": "",
"Set Title Auto-Generation Model": "",
"Set Voice": "",
"Settings": "",
"Settings saved successfully!": "",
"Share to OpenWebUI Community": "",
"short-summary": "",
"Show": "",
"Show Additional Params": "",
"Show shortcuts": "",
"sidebar": "",
"Sign in": "",
"Sign Out": "",
"Sign up": "",
"Speech recognition error: {{error}}": "",
"Speech-to-Text Engine": "",
"SpeechRecognition API is not supported in this browser.": "",
"Stop Sequence": "",
"STT Settings": "",
"Submit": "",
"Success": "",
"Successfully updated.": "",
"Sync All": "",
"System": "",
"System Prompt": "",
"Tags": "",
"Temperature": "",
"Template": "",
"Text Completion": "",
"Text-to-Speech Engine": "",
"Tfs Z": "",
"Theme": "",
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
"This setting does not sync across browsers or devices.": "",
"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "",
"Title": "",
"Title Auto-Generation": "",
"Title Generation Prompt": "",
"to": "",
"To access the available model names for downloading,": "",
"To access the GGUF models available for downloading,": "",
"to chat input.": "",
"Toggle settings": "",
"Toggle sidebar": "",
"Top K": "",
"Top P": "",
"Trouble accessing Ollama?": "",
"TTS Settings": "",
"Type Hugging Face Resolve (Download) URL": "",
"Uh-oh! There was an issue connecting to {{provider}}.": "Uh-oh! There was an issue connecting to {{provider}}.",
"Unknown File Type '{{file_type}}', but accepting and treating as plain text": "Unknown File Type '{{file_type}}', but accepting and treating as plain text",
"Update password": "Update password",
"Upload a GGUF model": "Upload a GGUF model",
"Upload files": "Upload files",
"Upload Progress": "Upload Progress",
"URL Mode": "URL Mode",
"Use '#' in the prompt input to load and select your documents.": "Use '#' in the prompt input to load and select your documents.",
"Uh-oh! There was an issue connecting to {{provider}}.": "",
"Unknown File Type '{{file_type}}', but accepting and treating as plain text": "",
"Update password": "",
"Upload a GGUF model": "",
"Upload files": "",
"Upload Progress": "",
"URL Mode": "",
"Use '#' in the prompt input to load and select your documents.": "",
"Use Gravatar": "",
"user": "user",
"User Permissions": "User Permissions",
"Users": "Users",
"Utilize": "Utilize",
"Valid time units:": "Valid time units:",
"variable": "variable",
"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content.",
"Version": "Version",
"Web": "Web",
"WebUI Add-ons": "WebUI Add-ons",
"WebUI Settings": "WebUI Settings",
"WebUI will make requests to": "WebUI will make requests to",
"Whats New in": "Whats New in",
"When history is turned off, new chats on this browser won't appear in your history on any of your devices.": "When history is turned off, new chats on this browser won't appear in your history on any of your devices.",
"Whisper (Local)": "Whisper (Local)",
"Write a prompt suggestion (e.g. Who are you?)": "Write a prompt suggestion (e.g. Who are you?)",
"Write a summary in 50 words that summarizes [topic or keyword].": "Write a summary in 50 words that summarizes [topic or keyword].",
"You": "You",
"You're a helpful assistant.": "You're a helpful assistant.",
"You're now logged in.": "You're now logged in."
"user": "",
"User Permissions": "",
"Users": "",
"Utilize": "",
"Valid time units:": "",
"variable": "",
"variable to have them replaced with clipboard content.": "",
"Version": "",
"Web": "",
"WebUI Add-ons": "",
"WebUI Settings": "",
"WebUI will make requests to": "",
"Whats New in": "",
"When history is turned off, new chats on this browser won't appear in your history on any of your devices.": "",
"Whisper (Local)": "",
"Write a prompt suggestion (e.g. Who are you?)": "",
"Write a summary in 50 words that summarizes [topic or keyword].": "",
"You": "",
"You're a helpful assistant.": "",
"You're now logged in.": ""
}

View file

@ -0,0 +1,362 @@
{
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'초', '분', '시간', '일', '주' 또는 만료 없음 '-1'",
"(Beta)": "(Beta)",
"(e.g. `sh webui.sh --api`)": "(예: `sh webui.sh --api`)",
"(latest)": "(latest)",
"{{modelName}} is thinking...": "{{modelName}} 이(가) 생각중입니다....",
"{{webUIName}} Backend Required": "{{webUIName}} 백엔드가 필요합니다.",
"a user": "사용자",
"About": "소개",
"Account": "계정",
"Action": "액션",
"Add a model": "모델 추가",
"Add a model tag name": "모델 태그명 추가",
"Add a short description about what this modelfile does": "이 모델파일이 하는 일에 대한 간단한 설명 추가",
"Add a short title for this prompt": "이 프롬프트에 대한 간단한 제목 추가",
"Add a tag": "태그 추가",
"Add Docs": "문서 추가",
"Add Files": "파일 추가",
"Add message": "메시지 추가",
"add tags": "태그들 추가",
"Adjusting these settings will apply changes universally to all users.": "이 설정을 조정하면 모든 사용자에게 적용됩니다.",
"admin": "관리자",
"Admin Panel": "관리자 패널",
"Admin Settings": "관리자 설정",
"Advanced Parameters": "고급 매개변수",
"all": "모두",
"All Users": "모든 사용자",
"Allow": "허용",
"Allow Chat Deletion": "채팅 삭제 허용",
"alphanumeric characters and hyphens": "영문자,숫자 및 하이픈",
"Already have an account?": "이미 계정이 있으신가요?",
"an assistant": "어시스턴트",
"and": "그리고",
"API Base URL": "API 기본 URL",
"API Key": "API키",
"API RPM": "API RPM",
"are allowed - Activate this command by typing": "허용됩니다 - 이 명령을 활성화하려면 입력하세요.",
"Are you sure?": "확실합니까?",
"Audio": "오디오",
"Auto-playback response": "응답 자동 재생",
"Auto-send input after 3 sec.": "3초 후 입력 자동 전송",
"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Base URL",
"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Base URL이 필요합니다.",
"available!": "사용 가능!",
"Back": "뒤로가기",
"Builder Mode": "빌더 모드",
"Cancel": "취소",
"Categories": "분류",
"Change Password": "비밀번호 변경",
"Chat": "채팅",
"Chat History": "채팅 기록",
"Chat History is off for this browser.": "이 브라우저에서 채팅 기록이 꺼져 있습니다.",
"Chats": "채팅",
"Check Again": "다시 확인",
"Check for updates": "업데이트 확인",
"Checking for updates...": "업데이트 확인중...",
"Choose a model before saving...": "저장하기 전에 모델을 선택하세요...",
"Chunk Overlap": "Chunk Overlap",
"Chunk Params": "Chunk Params",
"Chunk Size": "Chunk Size",
"Click here for help.": "도움말을 보려면 여기를 클릭하세요.",
"Click here to check other modelfiles.": "다른 모델파일을 확인하려면 여기를 클릭하세요.",
"Click here to select": "선택하려면 여기를 클릭하세요.",
"Click here to select documents.": "문서를 선택하려면 여기를 클릭하세요.",
"click here.": "여기를 클릭하세요.",
"Click on the user role button to change a user's role.": "사용자 역할 버튼을 클릭하여 사용자의 역할을 변경하세요.",
"Close": "닫기",
"Collection": "컬렉션",
"Command": "명령",
"Confirm Password": "비밀번호 확인",
"Connections": "연결",
"Content": "내용",
"Context Length": "내용 길이",
"Conversation Mode": "대화 모드",
"Copy last code block": "마지막 코드 블록 복사",
"Copy last response": "마지막 응답 복사",
"Copying to clipboard was successful!": "클립보드에 복사되었습니다!",
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':": "다음 질문에 대한 제목으로 간결한 3-5 단어 문구를 만드되 3-5 단어 제한을 엄격히 준수하고 'title' 단어 사용을 피하세요:",
"Create a modelfile": "모델파일 만들기",
"Create Account": "계정 만들기",
"Created at": "생성일",
"Created by": "생성자",
"Current Model": "현재 모델",
"Current Password": "현재 비밀번호",
"Custom": "사용자 정의",
"Customize Ollama models for a specific purpose": "특정 목적으로 Ollama 모델 사용자 정의",
"Dark": "어두운",
"Database": "데이터베이스",
"DD/MM/YYYY HH:mm": "DD/MM/YYYY HH:mm",
"Default": "기본값",
"Default (Automatic1111)": "기본값 (Automatic1111)",
"Default (Web API)": "기본값 (Web API)",
"Default model updated": "기본 모델이 업데이트되었습니다.",
"Default Prompt Suggestions": "기본 프롬프트 제안",
"Default User Role": "기본 사용자 역할",
"delete": "삭제",
"Delete a model": "모델 삭제",
"Delete chat": "채팅 삭제",
"Delete Chats": "채팅들 삭제",
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} 삭제됨",
"Deleted {tagName}": "{tagName} 삭제됨",
"Description": "설명",
"Desktop Notifications": "알림",
"Disabled": "비활성화",
"Discover a modelfile": "모델파일 검색",
"Discover a prompt": "프롬프트 검색",
"Discover, download, and explore custom prompts": "사용자 정의 프롬프트 검색, 다운로드 및 탐색",
"Discover, download, and explore model presets": "모델 사전 설정 검색, 다운로드 및 탐색",
"Display the username instead of You in the Chat": "채팅에서 'You' 대신 사용자 이름 표시",
"Document": "문서",
"Document Settings": "문서 설정",
"Documents": "문서들",
"does not make any external connections, and your data stays securely on your locally hosted server.": "어떠한 외부 연결도 하지 않으며, 데이터는 로컬에서 호스팅되는 서버에 안전하게 유지됩니다.",
"Don't Allow": "허용 안 함",
"Don't have an account?": "계정이 없으신가요?",
"Download as a File": "파일로 다운로드",
"Download Database": "데이터베이스 다운로드",
"Drop any files here to add to the conversation": "대화에 추가할 파일을 여기에 드롭하세요.",
"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "예: '30s','10m'. 유효한 시간 단위는 's', 'm', 'h'입니다.",
"Edit Doc": "문서 편집",
"Edit User": "사용자 편집",
"Email": "이메일",
"Enable Chat History": "채팅 기록 활성화",
"Enable New Sign Ups": "새 회원가입 활성화",
"Enabled": "활성화",
"Enter {{role}} message here": "여기에 {{role}} 메시지 입력",
"Enter API Key": "API 키 입력",
"Enter Chunk Overlap": "청크 오버랩 입력",
"Enter Chunk Size": "청크 크기 입력",
"Enter Image Size (e.g. 512x512)": "이미지 크기 입력(예: 512x512)",
"Enter LiteLLM API Base URL (litellm_params.api_base)": "LiteLLM API 기본 URL 입력(litellm_params.api_base)",
"Enter LiteLLM API Key (litellm_params.api_key)": "LiteLLM API 키 입력(litellm_params.api_key)",
"Enter LiteLLM API RPM (litellm_params.rpm)": "LiteLLM API RPM 입력(litellm_params.rpm)",
"Enter LiteLLM Model (litellm_params.model)": "LiteLLM 모델 입력(litellm_params.model)",
"Enter Max Tokens (litellm_params.max_tokens)": "최대 토큰 수 입력(litellm_params.max_tokens)",
"Enter model tag (e.g. {{modelTag}})": "모델 태그 입력(예: {{modelTag}})",
"Enter Number of Steps (e.g. 50)": "단계 수 입력(예: 50)",
"Enter stop sequence": "중지 시퀀스 입력",
"Enter Top K": "Top K 입력",
"Enter URL (e.g. http://127.0.0.1:7860/)": "URL 입력(예: http://127.0.0.1:7860/)",
"Enter Your Email": "이메일 입력",
"Enter Your Full Name": "전체 이름 입력",
"Enter Your Password": "비밀번호 입력",
"Experimental": "실험적",
"Export All Chats (All Users)": "모든 채팅 내보내기 (모든 사용자)",
"Export Chats": "채팅 내보내기",
"Export Documents Mapping": "문서 매핑 내보내기",
"Export Modelfiles": "모델파일 내보내기",
"Export Prompts": "프롬프트 내보내기",
"Failed to read clipboard contents": "클립보드 내용을 읽는 데 실패했습니다.",
"File Mode": "파일 모드",
"File not found.": "파일을 찾을 수 없습니다.",
"Focus chat input": "채팅 입력 포커스",
"Format your variables using square brackets like this:": "이렇게 대괄호를 사용하여 변수를 형식화하세요:",
"From (Base Model)": "출처(기본 모델)",
"Full Screen Mode": "전체 화면 모드",
"General": "일반",
"General Settings": "일반 설정",
"Hello, {{name}}": "안녕하세요, {{name}}",
"Hide": "숨기기",
"Hide Additional Params": "추가 매개변수 숨기기",
"How can I help you today?": "오늘 어떻게 도와드릴까요?",
"Image Generation (Experimental)": "이미지 생성(실험적)",
"Image Generation Engine": "이미지 생성 엔진",
"Image Settings": "이미지 설정",
"Images": "이미지",
"Import Chats": "채팅 가져오기",
"Import Documents Mapping": "문서 매핑 가져오기",
"Import Modelfiles": "모델파일 가져오기",
"Import Prompts": "프롬프트 가져오기",
"Include --api flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행할 때 --api 플래그 포함",
"Interface": "인터페이스",
"join our Discord for help.": "도움말을 보려면 Discord에 가입하세요.",
"JSON": "JSON",
"JWT Expiration": "JWT 만료",
"JWT Token": "JWT 토큰",
"Keep Alive": "계속 유지하기",
"Keyboard shortcuts": "키보드 단축키",
"Language": "언어",
"Light": "밝음",
"Listening...": "청취 중...",
"LLMs can make mistakes. Verify important information.": "LLM은 실수를 할 수 있습니다. 중요한 정보를 확인하세요.",
"Made by OpenWebUI Community": "OpenWebUI 커뮤니티에서 제작",
"Make sure to enclose them with": "다음으로 묶는 것을 잊지 마세요:",
"Manage LiteLLM Models": "LiteLLM 모델 관리",
"Manage Models": "모델 관리",
"Manage Ollama Models": "Ollama 모델 관리",
"Max Tokens": "최대 토큰 수",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "최대 3개의 모델을 동시에 다운로드할 수 있습니다. 나중에 다시 시도하세요.",
"Mirostat": "Mirostat",
"Mirostat Eta": "Mirostat Eta",
"Mirostat Tau": "Mirostat Tau",
"Model '{{modelName}}' has been successfully downloaded.": "모델 '{{modelName}}'이(가) 성공적으로 다운로드되었습니다.",
"Model '{{modelTag}}' is already in queue for downloading.": "모델 '{{modelTag}}'이(가) 이미 다운로드 대기열에 있습니다.",
"Model {{modelId}} not found": "모델 {{modelId}}를 찾을 수 없습니다.",
"Model {{modelName}} already exists.": "모델 {{modelName}}이(가) 이미 존재합니다.",
"Model Name": "모델 이름",
"Model not selected": "모델이 선택되지 않았습니다.",
"Model Tag Name": "모델 태그 이름",
"Model Whitelisting": "모델 허용 목록",
"Model(s) Whitelisted": "허용된 모델",
"Modelfile": "모델파일",
"Modelfile Advanced Settings": "모델파일 고급 설정",
"Modelfile Content": "모델파일 내용",
"Modelfiles": "모델파일",
"Models": "모델",
"My Documents": "내 문서",
"My Modelfiles": "내 모델파일",
"My Prompts": "내 프롬프트",
"Name": "이름",
"Name Tag": "이름 태그",
"Name your modelfile": "모델파일 이름 지정",
"New Chat": "새 채팅",
"New Password": "새 비밀번호",
"Not sure what to add?": "추가할 것이 궁금하세요?",
"Not sure what to write? Switch to": "무엇을 쓸지 모르겠나요? 전환하세요.",
"Off": "끄기",
"Okay, Let's Go!": "그렇습니다, 시작합시다!",
"Ollama Base URL": "Ollama 기본 URL",
"Ollama Version": "Ollama 버전",
"On": "켜기",
"Only": "오직",
"Only alphanumeric characters and hyphens are allowed in the command string.": "명령어 문자열에는 영문자, 숫자 및 하이픈만 허용됩니다.",
"Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.": "이런! 잠시만 기다려 주세요! 파일이 아직 처리 중입니다. 완벽하게 준비하고 있습니다. 잠시만 기다려주시면 준비가 되면 알려드리겠습니다.",
"Oops! Looks like the URL is invalid. Please double-check and try again.": "이런! URL이 잘못된 것 같습니다. 다시 한번 확인하고 다시 시도해주세요.",
"Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "이런! 지원되지 않는 방식(프론트엔드 전용)을 사용하고 계십니다. 백엔드에서 WebUI를 제공해주세요.",
"Open": "열기",
"Open AI": "Open AI",
"Open AI (Dall-E)": "OpenAI (Dall-E)",
"Open new chat": "새 채팅 열기",
"OpenAI API": "OpenAI API",
"OpenAI API Key": "OpenAI API 키",
"OpenAI API Key is required.": "OpenAI API 키가 필요합니다.",
"or": "또는",
"Parameters": "매개변수",
"Password": "비밀번호",
"PDF Extract Images (OCR)": "PDF에서 이미지 추출 (OCR)",
"pending": "보류 중",
"Permission denied when accessing microphone: {{error}}": "마이크 액세스가 거부되었습니다: {{error}}",
"Playground": "놀이터",
"Profile": "프로필",
"Prompt Content": "프롬프트 내용",
"Prompt suggestions": "프롬프트 제안",
"Prompts": "프롬프트",
"Pull a model from Ollama.com": "Ollama.com에서 모델 가져오기",
"Pull Progress": "가져오기 진행 상황",
"Query Params": "쿼리 매개변수",
"RAG Template": "RAG 템플릿",
"Raw Format": "Raw 형식",
"Record voice": "음성 녹음",
"Redirecting you to OpenWebUI Community": "OpenWebUI 커뮤니티로 리디렉션하는 중",
"Release Notes": "릴리스 노트",
"Repeat Last N": "마지막 N 반복",
"Repeat Penalty": "반복 패널티",
"Request Mode": "요청 모드",
"Reset Vector Storage": "벡터 스토리지 초기화",
"Response AutoCopy to Clipboard": "응답 자동 클립보드 복사",
"Role": "역할",
"Rosé Pine": "로제 파인",
"Rosé Pine Dawn": "로제 파인 던",
"Save": "저장",
"Save & Create": "저장 및 생성",
"Save & Submit": "저장 및 제출",
"Save & Update": "저장 및 업데이트",
"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "브라우저의 저장소에 채팅 로그를 직접 저장하는 것은 더 이상 지원되지 않습니다. 아래 버튼을 클릭하여 채팅 로그를 다운로드하고 삭제하세요. 걱정 마세요. 백엔드를 통해 채팅 로그를 쉽게 다시 가져올 수 있습니다.",
"Scan": "스캔",
"Scan complete!": "스캔 완료!",
"Scan for documents from {{path}}": "{{path}}에서 문서 스캔",
"Search": "검색",
"Search Documents": "문서 검색",
"Search Prompts": "프롬프트 검색",
"See readme.md for instructions": "설명은 readme.md를 참조하세요.",
"See what's new": "새로운 기능 보기",
"Seed": "시드",
"Select a mode": "모드 선택",
"Select a model": "모델 선택",
"Select an Ollama instance": "Ollama 인스턴스 선택",
"Send a Message": "메시지 보내기",
"Send message": "메시지 보내기",
"Server connection verified": "서버 연결 확인됨",
"Set as default": "기본값으로 설정",
"Set Default Model": "기본 모델 설정",
"Set Image Size": "이미지 크기 설정",
"Set Steps": "단계 설정",
"Set Title Auto-Generation Model": "제목 자동 생성 모델 설정",
"Set Voice": "음성 설정",
"Settings": "설정",
"Settings saved successfully!": "설정이 성공적으로 저장되었습니다!",
"Share to OpenWebUI Community": "OpenWebUI 커뮤니티에 공유",
"short-summary": "간단한 요약",
"Show": "보이기",
"Show Additional Params": "추가 매개변수 보기",
"Show shortcuts": "단축키 보기",
"sidebar": "사이드바",
"Sign in": "로그인",
"Sign Out": "로그아웃",
"Sign up": "가입",
"Speech recognition error: {{error}}": "음성 인식 오류: {{error}}",
"Speech-to-Text Engine": "음성-텍스트 엔진",
"SpeechRecognition API is not supported in this browser.": "이 브라우저에서는 SpeechRecognition API를 지원하지 않습니다.",
"Stop Sequence": "중지 시퀀스",
"STT Settings": "STT 설정",
"Submit": "제출",
"Success": "성공",
"Successfully updated.": "성공적으로 업데이트되었습니다.",
"Sync All": "모두 동기화",
"System": "시스템",
"System Prompt": "시스템 프롬프트",
"Tags": "Tags",
"Temperature": "Temperature",
"Template": "Template",
"Text Completion": "텍스트 완성",
"Text-to-Speech Engine": "텍스트-음성 엔진",
"Tfs Z": "Tfs Z",
"Theme": "테마",
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
"This setting does not sync across browsers or devices.": "이 설정은 브라우저 또는 장치 간에 동기화되지 않습니다.",
"Tip: Update multiple variable slots consecutively by pressing the tab key in the chat input after each replacement.": "팁: 각 대체 후 채팅 입력에서 탭 키를 눌러 여러 개의 변수 슬롯을 연속적으로 업데이트하세요.",
"Title": "제목",
"Title Auto-Generation": "제목 자동 생성",
"Title Generation Prompt": "제목 생성 프롬프트",
"to": "~까지",
"To access the available model names for downloading,": "다운로드 가능한 모델명을 확인하려면,",
"To access the GGUF models available for downloading,": "다운로드 가능한 GGUF 모델을 확인하려면,",
"to chat input.": "채팅 입력으로.",
"Toggle settings": "설정 전환",
"Toggle sidebar": "사이드바 전환",
"Top K": "Top K",
"Top P": "Top P",
"Trouble accessing Ollama?": "Ollama에 접근하는 데 문제가 있나요?",
"TTS Settings": "TTS 설정",
"Type Hugging Face Resolve (Download) URL": "Hugging Face Resolve (다운로드) URL 입력",
"Uh-oh! There was an issue connecting to {{provider}}.": "앗! {{provider}}에 연결하는 데 문제가 있었습니다.",
"Unknown File Type '{{file_type}}', but accepting and treating as plain text": "알 수 없는 파일 유형 '{{file_type}}', 하지만 일반 텍스트로 허용하고 처리합니다.",
"Update password": "비밀번호 업데이트",
"Upload a GGUF model": "GGUF 모델 업로드",
"Upload files": "파일 업로드",
"Upload Progress": "업로드 진행 상황",
"URL Mode": "URL 모드",
"Use '#' in the prompt input to load and select your documents.": "프롬프트 입력에서 '#'를 사용하여 문서를 로드하고 선택하세요.",
"Use Gravatar": "Gravatar 사용",
"user": "사용자",
"User Permissions": "사용자 권한",
"Users": "사용자",
"Utilize": "활용",
"Valid time units:": "유효한 시간 단위:",
"variable": "변수",
"variable to have them replaced with clipboard content.": "변수를 사용하여 클립보드 내용으로 바꾸세요.",
"Version": "버전",
"Web": "웹",
"WebUI Add-ons": "WebUI 애드온",
"WebUI Settings": "WebUI 설정",
"WebUI will make requests to": "WebUI가 요청할 대상:",
"What's New in": "새로운 기능",
"When history is turned off, new chats on this browser won't appear in your history on any of your devices.": "기록 기능이 꺼져 있으면 이 브라우저의 새 채팅이 다른 장치의 채팅 기록에 나타나지 않습니다.",
"Whisper (Local)": "위스퍼 (Local)",
"Write a prompt suggestion (e.g. Who are you?)": "프롬프트 제안 작성 (예: 당신은 누구인가요?)",
"Write a summary in 50 words that summarizes [topic or keyword].": "[주제 또는 키워드]에 대한 50단어 요약문 작성.",
"You": "당신",
"You're a helpful assistant.": "당신은 유용한 어시스턴트입니다.",
"You're now logged in.": "로그인되었습니다."
}

View file

@ -39,6 +39,10 @@
"code": "ja-JP",
"title": "Japanese"
},
{
"code": "ko-KR",
"title": "Korean"
},
{
"code": "nl-NL",
"title": "Dutch (Netherlands)"

View file

@ -7,8 +7,9 @@ export const config = writable(undefined);
export const user = writable(undefined);
// Frontend
export const theme = writable('system');
export const MODEL_DOWNLOAD_POOL = writable({});
export const theme = writable('system');
export const chatId = writable('');
export const chats = writable([]);

View file

@ -1,10 +1,40 @@
import { v4 as uuidv4 } from 'uuid';
import sha256 from 'js-sha256';
import { getOllamaModels } from '$lib/apis/ollama';
import { getOpenAIModels } from '$lib/apis/openai';
import { getLiteLLMModels } from '$lib/apis/litellm';
export const getModels = async (token: string) => {
let models = await Promise.all([
await getOllamaModels(token).catch((error) => {
console.log(error);
return null;
}),
await getOpenAIModels(token).catch((error) => {
console.log(error);
return null;
}),
await getLiteLLMModels(token).catch((error) => {
console.log(error);
return null;
})
]);
models = models
.filter((models) => models)
.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
return models;
};
//////////////////////////
// Helper functions
//////////////////////////
export const capitalizeFirstLetter = (string) => {
return string.charAt(0).toUpperCase() + string.slice(1);
};
export const splitStream = (splitOn) => {
let buffer = '';
return new TransformStream({

View file

@ -19,7 +19,7 @@
} from '$lib/stores';
import { copyToClipboard, splitStream } from '$lib/utils';
import { generateChatCompletion, cancelOllamaRequest, generateTitle } from '$lib/apis/ollama';
import { generateChatCompletion, cancelOllamaRequest } from '$lib/apis/ollama';
import {
addTagById,
createNewChat,
@ -30,14 +30,14 @@
updateChatById
} from '$lib/apis/chats';
import { queryCollection, queryDoc } from '$lib/apis/rag';
import { generateOpenAIChatCompletion } from '$lib/apis/openai';
import { generateOpenAIChatCompletion, generateTitle } from '$lib/apis/openai';
import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte';
import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte';
import { RAGTemplate } from '$lib/utils/rag';
import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
import { LITELLM_API_BASE_URL, OLLAMA_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
import { WEBUI_BASE_URL } from '$lib/constants';
const i18n = getContext('i18n');
@ -48,6 +48,7 @@
let messagesContainerElement: HTMLDivElement;
let currentRequestId = null;
let showModelSelector = false;
let selectedModels = [''];
let selectedModelfile = null;
@ -511,12 +512,17 @@
if (messages.length == 2 && messages.at(1).content !== '') {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
await generateChatTitle(_chatId, userPrompt);
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated
await tick();
scrollToBottom();
const docs = messages
@ -528,6 +534,8 @@
console.log(docs);
console.log(model);
const res = await generateOpenAIChatCompletion(
localStorage.token,
{
@ -580,7 +588,9 @@
max_tokens: $settings?.options?.num_predict ?? undefined,
docs: docs.length > 0 ? docs : undefined
},
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
model?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) {
@ -696,11 +706,8 @@
if (messages.length == 2) {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
if ($settings?.titleAutoGenerateModel) {
await generateChatTitle(_chatId, userPrompt);
} else {
await setChatTitle(_chatId, userPrompt);
}
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
@ -754,23 +761,46 @@
}
};
const generateChatTitle = async (_chatId, userPrompt) => {
if ($settings.titleAutoGenerate ?? true) {
const generateChatTitle = async (userPrompt) => {
if ($settings?.title?.auto ?? true) {
const model = $models.find((model) => model.id === selectedModels[0]);
const titleModelId =
model?.external ?? false
? $settings?.title?.modelExternal ?? selectedModels[0]
: $settings?.title?.model ?? selectedModels[0];
const titleModel = $models.find((model) => model.id === titleModelId);
console.log(titleModel);
const title = await generateTitle(
localStorage.token,
$settings?.titleGenerationPrompt ??
$settings?.title?.prompt ??
$i18n.t(
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
) + ' {{prompt}}',
$settings?.titleAutoGenerateModel ?? selectedModels[0],
userPrompt
titleModelId,
userPrompt,
titleModel?.external ?? false
? titleModel?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1`
);
if (title) {
await setChatTitle(_chatId, title);
}
return title;
} else {
await setChatTitle(_chatId, `${userPrompt}`);
return `${userPrompt}`;
}
};
const setChatTitle = async (_chatId, _title) => {
if (_chatId === $chatId) {
title = _title;
}
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
@ -801,17 +831,6 @@
_tags.set(await getAllChatTags(localStorage.token));
};
const setChatTitle = async (_chatId, _title) => {
if (_chatId === $chatId) {
title = _title;
}
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
</script>
<svelte:head>
@ -823,7 +842,16 @@
</svelte:head>
<div class="h-screen max-h-[100dvh] w-full flex flex-col">
<Navbar {title} shareEnabled={messages.length > 0} {initNewChat} {tags} {addTag} {deleteTag} />
<Navbar
{title}
bind:selectedModels
bind:showModelSelector
shareEnabled={messages.length > 0}
{initNewChat}
{tags}
{addTag}
{deleteTag}
/>
<div class="flex flex-col flex-auto">
<div
class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0"
@ -835,15 +863,7 @@
messagesContainerElement.clientHeight + 5;
}}
>
<div
class="{$settings?.fullScreenMode ?? null
? 'max-w-full'
: 'max-w-2xl md:px-0'} mx-auto w-full px-4"
>
<ModelSelector bind:selectedModels />
</div>
<div class=" h-full w-full flex flex-col py-8">
<div class=" h-full w-full flex flex-col pt-2 pb-4">
<Messages
chatId={$chatId}
{selectedModels}

View file

@ -19,7 +19,7 @@
} from '$lib/stores';
import { copyToClipboard, splitStream, convertMessagesToHistory } from '$lib/utils';
import { generateChatCompletion, generateTitle, cancelOllamaRequest } from '$lib/apis/ollama';
import { generateChatCompletion, cancelOllamaRequest } from '$lib/apis/ollama';
import {
addTagById,
createNewChat,
@ -31,14 +31,19 @@
updateChatById
} from '$lib/apis/chats';
import { queryCollection, queryDoc } from '$lib/apis/rag';
import { generateOpenAIChatCompletion } from '$lib/apis/openai';
import { generateOpenAIChatCompletion, generateTitle } from '$lib/apis/openai';
import MessageInput from '$lib/components/chat/MessageInput.svelte';
import Messages from '$lib/components/chat/Messages.svelte';
import ModelSelector from '$lib/components/chat/ModelSelector.svelte';
import Navbar from '$lib/components/layout/Navbar.svelte';
import { RAGTemplate } from '$lib/utils/rag';
import { LITELLM_API_BASE_URL, OPENAI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
import {
LITELLM_API_BASE_URL,
OPENAI_API_BASE_URL,
OLLAMA_API_BASE_URL,
WEBUI_BASE_URL
} from '$lib/constants';
const i18n = getContext('i18n');
@ -51,8 +56,10 @@
let currentRequestId = null;
// let chatId = $page.params.id;
let showModelSelector = false;
let selectedModels = [''];
let selectedModelfile = null;
$: selectedModelfile =
selectedModels.length === 1 &&
$modelfiles.filter((modelfile) => modelfile.tagName === selectedModels[0]).length > 0
@ -521,12 +528,17 @@
if (messages.length == 2 && messages.at(1).content !== '') {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
await generateChatTitle(_chatId, userPrompt);
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
const responseMessage = history.messages[responseMessageId];
// Wait until history/message have been updated
await tick();
scrollToBottom();
const docs = messages
@ -590,7 +602,9 @@
max_tokens: $settings?.options?.num_predict ?? undefined,
docs: docs.length > 0 ? docs : undefined
},
model.source === 'litellm' ? `${LITELLM_API_BASE_URL}/v1` : `${OPENAI_API_BASE_URL}`
model?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
);
if (res && res.ok) {
@ -706,11 +720,8 @@
if (messages.length == 2) {
window.history.replaceState(history.state, '', `/c/${_chatId}`);
if ($settings?.titleAutoGenerateModel) {
await generateChatTitle(_chatId, userPrompt);
} else {
await setChatTitle(_chatId, userPrompt);
}
const _title = await generateChatTitle(userPrompt);
await setChatTitle(_chatId, _title);
}
};
@ -719,6 +730,19 @@
console.log('stopResponse');
};
const regenerateResponse = async () => {
console.log('regenerateResponse');
if (messages.length != 0 && messages.at(-1).done == true) {
messages.splice(messages.length - 1, 1);
messages = messages;
let userMessage = messages.at(-1);
let userPrompt = userMessage.content;
await sendPrompt(userPrompt, userMessage.id);
}
};
const continueGeneration = async () => {
console.log('continueGeneration');
const _chatId = JSON.parse(JSON.stringify($chatId));
@ -751,36 +775,35 @@
}
};
const regenerateResponse = async () => {
console.log('regenerateResponse');
if (messages.length != 0 && messages.at(-1).done == true) {
messages.splice(messages.length - 1, 1);
messages = messages;
const generateChatTitle = async (userPrompt) => {
if ($settings?.title?.auto ?? true) {
const model = $models.find((model) => model.id === selectedModels[0]);
let userMessage = messages.at(-1);
let userPrompt = userMessage.content;
const titleModelId =
model?.external ?? false
? $settings?.title?.modelExternal ?? selectedModels[0]
: $settings?.title?.model ?? selectedModels[0];
const titleModel = $models.find((model) => model.id === titleModelId);
await sendPrompt(userPrompt, userMessage.id);
}
};
const generateChatTitle = async (_chatId, userPrompt) => {
if ($settings.titleAutoGenerate ?? true) {
console.log(titleModel);
const title = await generateTitle(
localStorage.token,
$settings?.titleGenerationPrompt ??
$settings?.title?.prompt ??
$i18n.t(
"Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':"
) + ' {{prompt}}',
$settings?.titleAutoGenerateModel ?? selectedModels[0],
userPrompt
titleModelId,
userPrompt,
titleModel?.external ?? false
? titleModel?.source?.toLowerCase() === 'litellm'
? `${LITELLM_API_BASE_URL}/v1`
: `${OPENAI_API_BASE_URL}`
: `${OLLAMA_API_BASE_URL}/v1`
);
if (title) {
await setChatTitle(_chatId, title);
}
return title;
} else {
await setChatTitle(_chatId, `${userPrompt}`);
return `${userPrompt}`;
}
};
@ -789,8 +812,10 @@
title = _title;
}
if ($settings.saveChatHistory ?? true) {
chat = await updateChatById(localStorage.token, _chatId, { title: _title });
await chats.set(await getChatList(localStorage.token));
}
};
const getTags = async () => {
@ -840,6 +865,8 @@
<div class="min-h-screen max-h-screen w-full flex flex-col">
<Navbar
{title}
bind:selectedModels
bind:showModelSelector
shareEnabled={messages.length > 0}
initNewChat={async () => {
if (currentRequestId !== null) {
@ -864,15 +891,7 @@
messagesContainerElement.clientHeight + 5;
}}
>
<div
class="{$settings?.fullScreenMode ?? null
? 'max-w-full'
: 'max-w-2xl md:px-0'} mx-auto w-full px-4"
>
<ModelSelector bind:selectedModels />
</div>
<div class=" h-full w-full flex flex-col py-8">
<div class=" h-full w-full flex flex-col py-4">
<Messages
chatId={$chatId}
{selectedModels}

View file

@ -18,6 +18,7 @@
import { splitStream } from '$lib/utils';
import ChatCompletion from '$lib/components/playground/ChatCompletion.svelte';
import Selector from '$lib/components/chat/ModelSelector/Selector.svelte';
const i18n = getContext('i18n');
@ -315,27 +316,24 @@
</div>
</div>
<div class=" flex gap-1 px-1">
<select
id="models"
class="outline-none bg-transparent text-sm font-medium rounded-lg w-full placeholder-gray-400"
<div class="flex flex-col gap-1 px-1 w-full">
<div class="flex w-full">
<div class="overflow-hidden w-full">
<div class="max-w-full">
<Selector
placeholder={$i18n.t('Select a model')}
items={$models
.filter((model) => model.name !== 'hr')
.map((model) => ({
value: model.id,
label: model.name,
info: model
}))}
bind:value={selectedModelId}
>
<option class=" text-gray-800" value="" selected disabled
>{$i18n.t('Select a model')}</option
>
{#each $models as model}
{#if model.name === 'hr'}
<hr />
{:else}
<option value={model.id} class="text-gray-800 text-lg"
>{model.name +
`${model.size ? ` (${(model.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}</option
>
{/if}
{/each}
</select>
/>
</div>
</div>
</div>
<!-- <button
class=" self-center dark:hover:text-gray-300"

View file

@ -11,7 +11,7 @@
import '../tailwind.css';
import 'tippy.js/dist/tippy.css';
import { WEBUI_BASE_URL } from '$lib/constants';
import i18n from '$lib/i18n';
import i18n, { initI18n } from '$lib/i18n';
setContext('i18n', i18n);
@ -25,6 +25,11 @@
if (backendConfig) {
// Save Backend Status to Store
await config.set(backendConfig);
if ($config.default_locale) {
initI18n($config.default_locale);
} else {
initI18n();
}
await WEBUI_NAME.set(backendConfig.name);
console.log(backendConfig);

View file

@ -16,7 +16,15 @@ const config = {
assets: 'build',
fallback: 'index.html'
})
}
},
onwarn: (warning, handler) => {
const { code, _ } = warning;
if (code === "css-unused-selector")
return;
handler(warning);
},
};
export default config;

View file

@ -16,9 +16,8 @@ export default {
700: '#4e4e4e',
800: '#333',
850: '#262626',
900: '#171717',
950: '#0d0d0d'
900: 'var(--color-gray-900, #171717)',
950: 'var(--color-gray-950, #0d0d0d)'
}
},
typography: {