Merge pull request #1488 from open-webui/dev

0.1.118
This commit is contained in:
Timothy Jaeryang Baek 2024-04-10 15:38:47 -07:00 committed by GitHub
commit 78284e49d7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
61 changed files with 1837 additions and 723 deletions

View file

@ -57,3 +57,14 @@ jobs:
path: . path: .
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Trigger Docker build workflow
uses: actions/github-script@v7
with:
script: |
github.rest.actions.createWorkflowDispatch({
owner: context.repo.owner,
repo: context.repo.repo,
workflow_id: 'docker-build.yaml',
ref: 'v${{ steps.get_version.outputs.version }}',
})

View file

@ -1,8 +1,7 @@
# name: Create and publish Docker images with specific build args
name: Create and publish a Docker image
# Configures this workflow to run every time a change is pushed to the branch called `release`.
on: on:
workflow_dispatch:
push: push:
branches: branches:
- main - main
@ -10,30 +9,39 @@ on:
tags: tags:
- v* - v*
# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
env: env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }} IMAGE_NAME: ${{ github.repository }}
FULL_IMAGE_NAME: ghcr.io/${{ github.repository }}
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
jobs: jobs:
build-and-push-image: build-main-image:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions: permissions:
contents: read contents: read
packages: write packages: write
# strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm64
steps: steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
# Required for multi architecture build
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v3
# Required for multi architecture build
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
@ -41,12 +49,11 @@ jobs:
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images - name: Extract metadata for Docker images (default latest tag)
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v5
with: with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.FULL_IMAGE_NAME }}
# This configuration dynamically generates tags based on the branch, tag, commit, and custom suffix for lite version.
tags: | tags: |
type=ref,event=branch type=ref,event=branch
type=ref,event=tag type=ref,event=tag
@ -56,11 +63,322 @@ jobs:
flavor: | flavor: |
latest=${{ github.ref == 'refs/heads/main' }} latest=${{ github.ref == 'refs/heads/main' }}
- name: Build and push Docker image - name: Build Docker image (latest)
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
id: build
with: with:
context: . context: .
push: true push: true
platforms: linux/amd64,linux/arm64 platforms: ${{ matrix.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-main-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
build-cuda-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm64
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images (default latest tag)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.FULL_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=git-
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=cuda
flavor: |
latest=${{ github.ref == 'refs/heads/main' }}
suffix=-cuda,onlatest=true
- name: Build Docker image (cuda)
uses: docker/build-push-action@v5
id: build
with:
context: .
push: true
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: USE_CUDA=true
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-cuda-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
build-ollama-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
strategy:
fail-fast: false
matrix:
platform:
- linux/amd64
- linux/arm64
steps:
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images (ollama tag)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.FULL_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=git-
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=ollama
flavor: |
latest=${{ github.ref == 'refs/heads/main' }}
suffix=-ollama,onlatest=true
- name: Build Docker image (ollama)
uses: docker/build-push-action@v5
id: build
with:
context: .
push: true
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: USE_OLLAMA=true
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-ollama-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge-main-images:
runs-on: ubuntu-latest
needs: [ build-main-image ]
steps:
- name: Download digests
uses: actions/download-artifact@v4
with:
pattern: digests-main-*
path: /tmp/digests
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images (default latest tag)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.FULL_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=git-
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
flavor: |
latest=${{ github.ref == 'refs/heads/main' }}
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.FULL_IMAGE_NAME }}@sha256:%s ' *)
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}
merge-cuda-images:
runs-on: ubuntu-latest
needs: [ build-cuda-image ]
steps:
- name: Download digests
uses: actions/download-artifact@v4
with:
pattern: digests-cuda-*
path: /tmp/digests
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images (default latest tag)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.FULL_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=git-
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=cuda
flavor: |
latest=${{ github.ref == 'refs/heads/main' }}
suffix=-cuda,onlatest=true
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.FULL_IMAGE_NAME }}@sha256:%s ' *)
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}
merge-ollama-images:
runs-on: ubuntu-latest
needs: [ build-ollama-image ]
steps:
- name: Download digests
uses: actions/download-artifact@v4
with:
pattern: digests-ollama-*
path: /tmp/digests
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker images (default ollama tag)
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.FULL_IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=sha,prefix=git-
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=ollama
flavor: |
latest=${{ github.ref == 'refs/heads/main' }}
suffix=-ollama,onlatest=true
- name: Create manifest list and push
working-directory: /tmp/digests
run: |
docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \
$(printf '${{ env.FULL_IMAGE_NAME }}@sha256:%s ' *)
- name: Inspect image
run: |
docker buildx imagetools inspect ${{ env.FULL_IMAGE_NAME }}:${{ steps.meta.outputs.version }}

View file

@ -5,6 +5,26 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.1.118] - 2024-04-10
### Added
- **🦙 Ollama and CUDA Images**: Added support for `:ollama` and `:cuda` tagged images.
- **👍 Enhanced Response Rating**: Now you can annotate your ratings for better feedback.
- **👤 User Initials Profile Photo**: User initials are now the default profile photo.
- **🔍 Update RAG Embedding Model**: Customize RAG embedding model directly in document settings.
- **🌍 Additional Language Support**: Added Turkish language support.
### Fixed
- **🔒 Share Chat Permission**: Resolved issue with chat sharing permissions.
- **🛠 Modal Close**: Modals can now be closed using the Esc key.
### Changed
- **🎨 Admin Panel Styling**: Refreshed styling for the admin panel.
- **🐳 Docker Image Build**: Updated docker image build process for improved efficiency.
## [0.1.117] - 2024-04-03 ## [0.1.117] - 2024-04-03
### Added ### Added

View file

@ -1,82 +1,116 @@
# syntax=docker/dockerfile:1 # syntax=docker/dockerfile:1
# Initialize device type args
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
ARG USE_CUDA_VER=cu121
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard
# for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB)
# IMPORTANT: If you change the default model (all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=all-MiniLM-L6-v2
FROM node:alpine as build ######## WebUI frontend ########
FROM --platform=$BUILDPLATFORM node:21-alpine3.19 as build
WORKDIR /app WORKDIR /app
# wget embedding model weight from alpine (does not exist from slim-buster)
RUN wget "https://chroma-onnx-models.s3.amazonaws.com/all-MiniLM-L6-v2/onnx.tar.gz" -O - | \
tar -xzf - -C /app
COPY package.json package-lock.json ./ COPY package.json package-lock.json ./
RUN npm ci RUN npm ci
COPY . . COPY . .
RUN npm run build RUN npm run build
######## WebUI backend ########
FROM python:3.11-slim-bookworm as base FROM python:3.11-slim-bookworm as base
ENV ENV=prod # Use args
ENV PORT "" ARG USE_CUDA
ARG USE_OLLAMA
ARG USE_CUDA_VER
ARG USE_EMBEDDING_MODEL
ENV OLLAMA_BASE_URL "/ollama" ## Basis ##
ENV ENV=prod \
PORT=8080 \
# pass build args to the build
USE_OLLAMA_DOCKER=${USE_OLLAMA} \
USE_CUDA_DOCKER=${USE_CUDA} \
USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \
USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL}
ENV OPENAI_API_BASE_URL "" ## Basis URL Config ##
ENV OPENAI_API_KEY "" ENV OLLAMA_BASE_URL="/ollama" \
OPENAI_API_BASE_URL=""
ENV WEBUI_SECRET_KEY "" ## API Key and Security Config ##
ENV WEBUI_AUTH_TRUSTED_EMAIL_HEADER "" ENV OPENAI_API_KEY="" \
WEBUI_SECRET_KEY="" \
ENV SCARF_NO_ANALYTICS true SCARF_NO_ANALYTICS=true \
ENV DO_NOT_TRACK true DO_NOT_TRACK=true
# Use locally bundled version of the LiteLLM cost map json # Use locally bundled version of the LiteLLM cost map json
# to avoid repetitive startup connections # to avoid repetitive startup connections
ENV LITELLM_LOCAL_MODEL_COST_MAP="True" ENV LITELLM_LOCAL_MODEL_COST_MAP="True"
######## Preloaded models ########
# whisper TTS Settings
ENV WHISPER_MODEL="base"
ENV WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
# RAG Embedding Model Settings #### Other models #########################################################
# any sentence transformer model; models to use can be found at https://huggingface.co/models?library=sentence-transformers ## whisper TTS model settings ##
# Leaderboard: https://huggingface.co/spaces/mteb/leaderboard ENV WHISPER_MODEL="base" \
# for better persormance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB) WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
# IMPORTANT: If you change the default model (all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ENV RAG_EMBEDDING_MODEL="all-MiniLM-L6-v2"
# device type for whisper tts and embbeding models - "cpu" (default), "cuda" (nvidia gpu and CUDA required) or "mps" (apple silicon) - choosing this right can lead to better performance
ENV RAG_EMBEDDING_MODEL_DEVICE_TYPE="cpu"
ENV RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models"
ENV SENTENCE_TRANSFORMERS_HOME $RAG_EMBEDDING_MODEL_DIR
######## Preloaded models ######## ## RAG Embedding model settings ##
ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models" \
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
#### Other models ##########################################################
WORKDIR /app/backend WORKDIR /app/backend
RUN if [ "$USE_OLLAMA" = "true" ]; then \
apt-get update && \
# Install pandoc and netcat
apt-get install -y --no-install-recommends pandoc netcat-openbsd && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# install helper tools
apt-get install -y --no-install-recommends curl && \
# install ollama
curl -fsSL https://ollama.com/install.sh | sh && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
else \
apt-get update && \
# Install pandoc and netcat
apt-get install -y --no-install-recommends pandoc netcat-openbsd && \
# for RAG OCR
apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
# cleanup
rm -rf /var/lib/apt/lists/*; \
fi
# install python dependencies # install python dependencies
COPY ./backend/requirements.txt ./requirements.txt COPY ./backend/requirements.txt ./requirements.txt
RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y RUN if [ "$USE_CUDA" = "true" ]; then \
# If you use CUDA the whisper and embedding model will be downloaded on first use
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
pip3 install -r requirements.txt --no-cache-dir && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device='cpu')"; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
pip3 install -r requirements.txt --no-cache-dir && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \
python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device='cpu')"; \
fi
RUN pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir
RUN pip3 install -r requirements.txt --no-cache-dir
# Install pandoc and netcat
# RUN python -c "import pypandoc; pypandoc.download_pandoc()"
RUN apt-get update \
&& apt-get install -y pandoc netcat-openbsd \
&& rm -rf /var/lib/apt/lists/*
# preload embedding model
RUN python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device=os.environ['RAG_EMBEDDING_MODEL_DEVICE_TYPE'])"
# preload tts model
RUN python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='auto', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"
# copy embedding weight from build # copy embedding weight from build
RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2 # RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx # COPY --from=build /app/onnx /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx
# copy built frontend files # copy built frontend files
COPY --from=build /app/build /app/build COPY --from=build /app/build /app/build
@ -86,4 +120,6 @@ COPY --from=build /app/package.json /app/package.json
# copy backend files # copy backend files
COPY ./backend . COPY ./backend .
EXPOSE 8080
CMD [ "bash", "start.sh"] CMD [ "bash", "start.sh"]

View file

@ -94,24 +94,27 @@ Don't forget to explore our sibling project, [Open WebUI Community](https://open
### Quick Start with Docker 🐳 ### Quick Start with Docker 🐳
> [!IMPORTANT] > [!WARNING]
> When using Docker to install Open WebUI, make sure to include the `-v open-webui:/app/backend/data` in your Docker command. This step is crucial as it ensures your database is properly mounted and prevents any loss of data. > When using Docker to install Open WebUI, make sure to include the `-v open-webui:/app/backend/data` in your Docker command. This step is crucial as it ensures your database is properly mounted and prevents any loss of data.
- **If Ollama is on your computer**, use this command: > [!TIP]
> If you wish to utilize Open WebUI with Ollama included or CUDA acceleration, we recommend utilizing our official images tagged with either `:cuda` or `:ollama`. To enable CUDA, you must install the [Nvidia CUDA container toolkit](https://docs.nvidia.com/dgx/nvidia-container-runtime-upgrade/) on your Linux/WSL system.
**If Ollama is on your computer**, use this command:
```bash ```bash
docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
``` ```
- **If Ollama is on a Different Server**, use this command: **If Ollama is on a Different Server**, use this command:
- To connect to Ollama on another server, change the `OLLAMA_BASE_URL` to the server's URL: To connect to Ollama on another server, change the `OLLAMA_BASE_URL` to the server's URL:
```bash ```bash
docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=https://example.com -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=https://example.com -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
``` ```
- After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄 After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! 😄
#### Open WebUI: Server Connection Error #### Open WebUI: Server Connection Error

View file

@ -28,6 +28,7 @@ from config import (
UPLOAD_DIR, UPLOAD_DIR,
WHISPER_MODEL, WHISPER_MODEL,
WHISPER_MODEL_DIR, WHISPER_MODEL_DIR,
DEVICE_TYPE,
) )
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -42,6 +43,10 @@ app.add_middleware(
allow_headers=["*"], allow_headers=["*"],
) )
# setting device type for whisper model
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
log.info(f"whisper_device_type: {whisper_device_type}")
@app.post("/transcribe") @app.post("/transcribe")
def transcribe( def transcribe(
@ -66,7 +71,7 @@ def transcribe(
model = WhisperModel( model = WhisperModel(
WHISPER_MODEL, WHISPER_MODEL,
device="auto", device=whisper_device_type,
compute_type="int8", compute_type="int8",
download_root=WHISPER_MODEL_DIR, download_root=WHISPER_MODEL_DIR,
) )

View file

@ -215,7 +215,8 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
if len(responses) > 0: if len(responses) > 0:
lowest_version = min( lowest_version = min(
responses, key=lambda x: tuple(map(int, x["version"].split("."))) responses,
key=lambda x: tuple(map(int, x["version"].split("-")[0].split("."))),
) )
return {"version": lowest_version["version"]} return {"version": lowest_version["version"]}

View file

@ -13,8 +13,8 @@ import os, shutil, logging, re
from pathlib import Path from pathlib import Path
from typing import List from typing import List
from sentence_transformers import SentenceTransformer
from chromadb.utils import embedding_functions from chromadb.utils import embedding_functions
from chromadb.utils.batch_utils import create_batches
from langchain_community.document_loaders import ( from langchain_community.document_loaders import (
WebBaseLoader, WebBaseLoader,
@ -45,7 +45,7 @@ from apps.web.models.documents import (
DocumentResponse, DocumentResponse,
) )
from apps.rag.utils import query_doc, query_collection from apps.rag.utils import query_doc, query_collection, get_embedding_model_path
from utils.misc import ( from utils.misc import (
calculate_sha256, calculate_sha256,
@ -59,7 +59,8 @@ from config import (
UPLOAD_DIR, UPLOAD_DIR,
DOCS_DIR, DOCS_DIR,
RAG_EMBEDDING_MODEL, RAG_EMBEDDING_MODEL,
RAG_EMBEDDING_MODEL_DEVICE_TYPE, RAG_EMBEDDING_MODEL_AUTO_UPDATE,
DEVICE_TYPE,
CHROMA_CLIENT, CHROMA_CLIENT,
CHUNK_SIZE, CHUNK_SIZE,
CHUNK_OVERLAP, CHUNK_OVERLAP,
@ -71,28 +72,25 @@ from constants import ERROR_MESSAGES
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["RAG"]) log.setLevel(SRC_LOG_LEVELS["RAG"])
#
# if RAG_EMBEDDING_MODEL:
# sentence_transformer_ef = SentenceTransformer(
# model_name_or_path=RAG_EMBEDDING_MODEL,
# cache_folder=RAG_EMBEDDING_MODEL_DIR,
# device=RAG_EMBEDDING_MODEL_DEVICE_TYPE,
# )
app = FastAPI() app = FastAPI()
app.state.PDF_EXTRACT_IMAGES = False app.state.PDF_EXTRACT_IMAGES = False
app.state.CHUNK_SIZE = CHUNK_SIZE app.state.CHUNK_SIZE = CHUNK_SIZE
app.state.CHUNK_OVERLAP = CHUNK_OVERLAP app.state.CHUNK_OVERLAP = CHUNK_OVERLAP
app.state.RAG_TEMPLATE = RAG_TEMPLATE app.state.RAG_TEMPLATE = RAG_TEMPLATE
app.state.RAG_EMBEDDING_MODEL = RAG_EMBEDDING_MODEL app.state.RAG_EMBEDDING_MODEL = RAG_EMBEDDING_MODEL
app.state.TOP_K = 4 app.state.TOP_K = 4
app.state.sentence_transformer_ef = ( app.state.sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction( embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=app.state.RAG_EMBEDDING_MODEL, model_name=get_embedding_model_path(
device=RAG_EMBEDDING_MODEL_DEVICE_TYPE, app.state.RAG_EMBEDDING_MODEL, RAG_EMBEDDING_MODEL_AUTO_UPDATE
),
device=DEVICE_TYPE,
) )
) )
@ -143,19 +141,34 @@ class EmbeddingModelUpdateForm(BaseModel):
async def update_embedding_model( async def update_embedding_model(
form_data: EmbeddingModelUpdateForm, user=Depends(get_admin_user) form_data: EmbeddingModelUpdateForm, user=Depends(get_admin_user)
): ):
app.state.RAG_EMBEDDING_MODEL = form_data.embedding_model
app.state.sentence_transformer_ef = ( log.info(
f"Updating embedding model: {app.state.RAG_EMBEDDING_MODEL} to {form_data.embedding_model}"
)
try:
sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction( embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=app.state.RAG_EMBEDDING_MODEL, model_name=get_embedding_model_path(form_data.embedding_model, True),
device=RAG_EMBEDDING_MODEL_DEVICE_TYPE, device=DEVICE_TYPE,
) )
) )
app.state.RAG_EMBEDDING_MODEL = form_data.embedding_model
app.state.sentence_transformer_ef = sentence_transformer_ef
return { return {
"status": True, "status": True,
"embedding_model": app.state.RAG_EMBEDDING_MODEL, "embedding_model": app.state.RAG_EMBEDDING_MODEL,
} }
except Exception as e:
log.exception(f"Problem updating embedding model: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT(e),
)
@app.get("/config") @app.get("/config")
async def get_rag_config(user=Depends(get_admin_user)): async def get_rag_config(user=Depends(get_admin_user)):
@ -341,9 +354,14 @@ def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> b
embedding_function=app.state.sentence_transformer_ef, embedding_function=app.state.sentence_transformer_ef,
) )
collection.add( for batch in create_batches(
documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts] api=CHROMA_CLIENT,
) ids=[str(uuid.uuid1()) for _ in texts],
metadatas=metadatas,
documents=texts,
):
collection.add(*batch)
return True return True
except Exception as e: except Exception as e:
log.exception(e) log.exception(e)

View file

@ -1,6 +1,8 @@
import os
import re import re
import logging import logging
from typing import List from typing import List
from huggingface_hub import snapshot_download
from config import SRC_LOG_LEVELS, CHROMA_CLIENT from config import SRC_LOG_LEVELS, CHROMA_CLIENT
@ -188,3 +190,43 @@ def rag_messages(docs, messages, template, k, embedding_function):
messages[last_user_message_idx] = new_user_message messages[last_user_message_idx] = new_user_message
return messages return messages
def get_embedding_model_path(
embedding_model: str, update_embedding_model: bool = False
):
# Construct huggingface_hub kwargs with local_files_only to return the snapshot path
cache_dir = os.getenv("SENTENCE_TRANSFORMERS_HOME")
local_files_only = not update_embedding_model
snapshot_kwargs = {
"cache_dir": cache_dir,
"local_files_only": local_files_only,
}
log.debug(f"embedding_model: {embedding_model}")
log.debug(f"snapshot_kwargs: {snapshot_kwargs}")
# Inspiration from upstream sentence_transformers
if (
os.path.exists(embedding_model)
or ("\\" in embedding_model or embedding_model.count("/") > 1)
and local_files_only
):
# If fully qualified path exists, return input, else set repo_id
return embedding_model
elif "/" not in embedding_model:
# Set valid repo_id for model short-name
embedding_model = "sentence-transformers" + "/" + embedding_model
snapshot_kwargs["repo_id"] = embedding_model
# Attempt to query the huggingface_hub library to determine the local path and/or to update
try:
embedding_model_repo_path = snapshot_download(**snapshot_kwargs)
log.debug(f"embedding_model_repo_path: {embedding_model_repo_path}")
return embedding_model_repo_path
except Exception as e:
log.exception(f"Cannot determine embedding model snapshot path: {e}")
return embedding_model

View file

@ -86,6 +86,7 @@ class SignupForm(BaseModel):
name: str name: str
email: str email: str
password: str password: str
profile_image_url: Optional[str] = "/user.png"
class AuthsTable: class AuthsTable:
@ -94,7 +95,12 @@ class AuthsTable:
self.db.create_tables([Auth]) self.db.create_tables([Auth])
def insert_new_auth( def insert_new_auth(
self, email: str, password: str, name: str, role: str = "pending" self,
email: str,
password: str,
name: str,
profile_image_url: str = "/user.png",
role: str = "pending",
) -> Optional[UserModel]: ) -> Optional[UserModel]:
log.info("insert_new_auth") log.info("insert_new_auth")
@ -105,7 +111,7 @@ class AuthsTable:
) )
result = Auth.create(**auth.model_dump()) result = Auth.create(**auth.model_dump())
user = Users.insert_new_user(id, name, email, role) user = Users.insert_new_user(id, name, email, profile_image_url, role)
if result and user: if result and user:
return user return user

View file

@ -206,6 +206,18 @@ class ChatTable:
except: except:
return None return None
def get_chat_by_share_id(self, id: str) -> Optional[ChatModel]:
try:
chat = Chat.get(Chat.share_id == id)
if chat:
chat = Chat.get(Chat.id == id)
return ChatModel(**model_to_dict(chat))
else:
return None
except:
return None
def get_chat_by_id_and_user_id(self, id: str, user_id: str) -> Optional[ChatModel]: def get_chat_by_id_and_user_id(self, id: str, user_id: str) -> Optional[ChatModel]:
try: try:
chat = Chat.get(Chat.id == id, Chat.user_id == user_id) chat = Chat.get(Chat.id == id, Chat.user_id == user_id)

View file

@ -31,7 +31,7 @@ class UserModel(BaseModel):
name: str name: str
email: str email: str
role: str = "pending" role: str = "pending"
profile_image_url: str = "/user.png" profile_image_url: str
timestamp: int # timestamp in epoch timestamp: int # timestamp in epoch
api_key: Optional[str] = None api_key: Optional[str] = None
@ -59,7 +59,12 @@ class UsersTable:
self.db.create_tables([User]) self.db.create_tables([User])
def insert_new_user( def insert_new_user(
self, id: str, name: str, email: str, role: str = "pending" self,
id: str,
name: str,
email: str,
profile_image_url: str = "/user.png",
role: str = "pending",
) -> Optional[UserModel]: ) -> Optional[UserModel]:
user = UserModel( user = UserModel(
**{ **{
@ -67,7 +72,7 @@ class UsersTable:
"name": name, "name": name,
"email": email, "email": email,
"role": role, "role": role,
"profile_image_url": "/user.png", "profile_image_url": profile_image_url,
"timestamp": int(time.time()), "timestamp": int(time.time()),
} }
) )

View file

@ -163,7 +163,11 @@ async def signup(request: Request, form_data: SignupForm):
) )
hashed = get_password_hash(form_data.password) hashed = get_password_hash(form_data.password)
user = Auths.insert_new_auth( user = Auths.insert_new_auth(
form_data.email.lower(), hashed, form_data.name, role form_data.email.lower(),
hashed,
form_data.name,
form_data.profile_image_url,
role,
) )
if user: if user:

View file

@ -251,6 +251,14 @@ async def delete_shared_chat_by_id(id: str, user=Depends(get_current_user)):
@router.get("/share/{share_id}", response_model=Optional[ChatResponse]) @router.get("/share/{share_id}", response_model=Optional[ChatResponse])
async def get_shared_chat_by_id(share_id: str, user=Depends(get_current_user)): async def get_shared_chat_by_id(share_id: str, user=Depends(get_current_user)):
if user.role == "pending":
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
)
if user.role == "user":
chat = Chats.get_chat_by_share_id(share_id)
elif user.role == "admin":
chat = Chats.get_chat_by_id(share_id) chat = Chats.get_chat_by_id(share_id)
if chat: if chat:

View file

@ -28,8 +28,6 @@ except ImportError:
WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI") WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png" WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
shutil.copyfile("../build/favicon.png", "./static/favicon.png")
#################################### ####################################
# ENV (dev,test,prod) # ENV (dev,test,prod)
#################################### ####################################
@ -103,6 +101,26 @@ for version in soup.find_all("h2"):
CHANGELOG = changelog_json CHANGELOG = changelog_json
####################################
# DATA/FRONTEND BUILD DIR
####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
try:
with open(f"{DATA_DIR}/config.json", "r") as f:
CONFIG_DATA = json.load(f)
except:
CONFIG_DATA = {}
####################################
# Static DIR
####################################
STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve())
shutil.copyfile(f"{FRONTEND_BUILD_DIR}/favicon.png", f"{STATIC_DIR}/favicon.png")
#################################### ####################################
# LOGGING # LOGGING
@ -165,7 +183,7 @@ if CUSTOM_NAME:
r = requests.get(url, stream=True) r = requests.get(url, stream=True)
if r.status_code == 200: if r.status_code == 200:
with open("./static/favicon.png", "wb") as f: with open(f"{STATIC_DIR}/favicon.png", "wb") as f:
r.raw.decode_content = True r.raw.decode_content = True
shutil.copyfileobj(r.raw, f) shutil.copyfileobj(r.raw, f)
@ -177,18 +195,6 @@ else:
if WEBUI_NAME != "Open WebUI": if WEBUI_NAME != "Open WebUI":
WEBUI_NAME += " (Open WebUI)" WEBUI_NAME += " (Open WebUI)"
####################################
# DATA/FRONTEND BUILD DIR
####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
try:
with open(f"{DATA_DIR}/config.json", "r") as f:
CONFIG_DATA = json.load(f)
except:
CONFIG_DATA = {}
#################################### ####################################
# File Upload DIR # File Upload DIR
@ -257,6 +263,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "") OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
K8S_FLAG = os.environ.get("K8S_FLAG", "") K8S_FLAG = os.environ.get("K8S_FLAG", "")
USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "": if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
OLLAMA_BASE_URL = ( OLLAMA_BASE_URL = (
@ -266,9 +273,13 @@ if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
) )
if ENV == "prod": if ENV == "prod":
if OLLAMA_BASE_URL == "/ollama": if OLLAMA_BASE_URL == "/ollama" and not K8S_FLAG:
if USE_OLLAMA_DOCKER.lower() == "true":
# if you use all-in-one docker container (Open WebUI + Ollama)
# with the docker build arg USE_OLLAMA=true (--build-arg="USE_OLLAMA=true") this only works with http://localhost:11434
OLLAMA_BASE_URL = "http://localhost:11434"
else:
OLLAMA_BASE_URL = "http://host.docker.internal:11434" OLLAMA_BASE_URL = "http://host.docker.internal:11434"
elif K8S_FLAG: elif K8S_FLAG:
OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434" OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434"
@ -391,10 +402,22 @@ if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db" CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
# this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2) # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2") RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
# device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL}"),
RAG_EMBEDDING_MODEL_DEVICE_TYPE = os.environ.get(
"RAG_EMBEDDING_MODEL_DEVICE_TYPE", "cpu" RAG_EMBEDDING_MODEL_AUTO_UPDATE = (
os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true"
) )
# device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
if USE_CUDA.lower() == "true":
DEVICE_TYPE = "cuda"
else:
DEVICE_TYPE = "cpu"
CHROMA_CLIENT = chromadb.PersistentClient( CHROMA_CLIENT = chromadb.PersistentClient(
path=CHROMA_DATA_PATH, path=CHROMA_DATA_PATH,
settings=Settings(allow_reset=True, anonymized_telemetry=False), settings=Settings(allow_reset=True, anonymized_telemetry=False),

View file

@ -5,6 +5,7 @@ import time
import os import os
import sys import sys
import logging import logging
import aiohttp
import requests import requests
from fastapi import FastAPI, Request, Depends, status from fastapi import FastAPI, Request, Depends, status
@ -18,6 +19,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
from apps.ollama.main import app as ollama_app from apps.ollama.main import app as ollama_app
from apps.openai.main import app as openai_app from apps.openai.main import app as openai_app
from apps.litellm.main import app as litellm_app, startup as litellm_app_startup from apps.litellm.main import app as litellm_app, startup as litellm_app_startup
from apps.audio.main import app as audio_app from apps.audio.main import app as audio_app
from apps.images.main import app as images_app from apps.images.main import app as images_app
@ -38,6 +40,8 @@ from config import (
VERSION, VERSION,
CHANGELOG, CHANGELOG,
FRONTEND_BUILD_DIR, FRONTEND_BUILD_DIR,
CACHE_DIR,
STATIC_DIR,
MODEL_FILTER_ENABLED, MODEL_FILTER_ENABLED,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
GLOBAL_LOG_LEVEL, GLOBAL_LOG_LEVEL,
@ -269,14 +273,16 @@ async def get_app_changelog():
@app.get("/api/version/updates") @app.get("/api/version/updates")
async def get_app_latest_release_version(): async def get_app_latest_release_version():
try: try:
response = requests.get( async with aiohttp.ClientSession() as session:
f"https://api.github.com/repos/open-webui/open-webui/releases/latest" async with session.get(
) "https://api.github.com/repos/open-webui/open-webui/releases/latest"
) as response:
response.raise_for_status() response.raise_for_status()
latest_version = response.json()["tag_name"] data = await response.json()
latest_version = data["tag_name"]
return {"current": VERSION, "latest": latest_version[1:]} return {"current": VERSION, "latest": latest_version[1:]}
except Exception as e: except aiohttp.ClientError as e:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE, status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED, detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
@ -297,9 +303,8 @@ async def get_manifest_json():
} }
app.mount("/static", StaticFiles(directory="static"), name="static") app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
app.mount("/cache", StaticFiles(directory="data/cache"), name="cache") app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
app.mount( app.mount(
"/", "/",

View file

@ -7,16 +7,26 @@ KEY_FILE=.webui_secret_key
PORT="${PORT:-8080}" PORT="${PORT:-8080}"
if test "$WEBUI_SECRET_KEY $WEBUI_JWT_SECRET_KEY" = " "; then if test "$WEBUI_SECRET_KEY $WEBUI_JWT_SECRET_KEY" = " "; then
echo No WEBUI_SECRET_KEY provided echo "No WEBUI_SECRET_KEY provided"
if ! [ -e "$KEY_FILE" ]; then if ! [ -e "$KEY_FILE" ]; then
echo Generating WEBUI_SECRET_KEY echo "Generating WEBUI_SECRET_KEY"
# Generate a random value to use as a WEBUI_SECRET_KEY in case the user didn't provide one. # Generate a random value to use as a WEBUI_SECRET_KEY in case the user didn't provide one.
echo $(head -c 12 /dev/random | base64) > $KEY_FILE echo $(head -c 12 /dev/random | base64) > "$KEY_FILE"
fi fi
echo Loading WEBUI_SECRET_KEY from $KEY_FILE echo "Loading WEBUI_SECRET_KEY from $KEY_FILE"
WEBUI_SECRET_KEY=`cat $KEY_FILE` WEBUI_SECRET_KEY=$(cat "$KEY_FILE")
fi
if [ "$USE_OLLAMA_DOCKER" = "true" ]; then
echo "USE_OLLAMA is set to true, starting ollama serve."
ollama serve &
fi
if [ "$USE_CUDA_DOCKER" = "true" ]; then
echo "CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
fi fi
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host 0.0.0.0 --port "$PORT" --forwarded-allow-ips '*' WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host 0.0.0.0 --port "$PORT" --forwarded-allow-ips '*'

View file

@ -0,0 +1,8 @@
services:
ollama:
devices:
- /dev/kfd:/dev/kfd
- /dev/dri:/dev/dri
image: ollama/ollama:${OLLAMA_DOCKER_TAG-rocm}
environment:
- 'HSA_OVERRIDE_GFX_VERSION=${HSA_OVERRIDE_GFX_VERSION-11.0.0}'

View file

@ -8,7 +8,7 @@ services:
pull_policy: always pull_policy: always
tty: true tty: true
restart: unless-stopped restart: unless-stopped
image: ollama/ollama:latest image: ollama/ollama:${OLLAMA_DOCKER_TAG-latest}
open-webui: open-webui:
build: build:
@ -16,7 +16,7 @@ services:
args: args:
OLLAMA_BASE_URL: '/ollama' OLLAMA_BASE_URL: '/ollama'
dockerfile: Dockerfile dockerfile: Dockerfile
image: ghcr.io/open-webui/open-webui:main image: ghcr.io/open-webui/open-webui:${WEBUI_DOCKER_TAG-main}
container_name: open-webui container_name: open-webui
volumes: volumes:
- open-webui:/app/backend/data - open-webui:/app/backend/data

View file

@ -7,8 +7,12 @@ ollama
{{- end -}} {{- end -}}
{{- define "ollama.url" -}} {{- define "ollama.url" -}}
{{- if .Values.ollama.externalHost }}
{{- printf .Values.ollama.externalHost }}
{{- else }}
{{- printf "http://%s.%s.svc.cluster.local:%d/" (include "ollama.name" .) (.Release.Namespace) (.Values.ollama.service.port | int) }} {{- printf "http://%s.%s.svc.cluster.local:%d/" (include "ollama.name" .) (.Release.Namespace) (.Values.ollama.service.port | int) }}
{{- end }} {{- end }}
{{- end }}
{{- define "chart.name" -}} {{- define "chart.name" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}

View file

@ -1,3 +1,4 @@
{{- if not .Values.ollama.externalHost }}
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
@ -19,3 +20,4 @@ spec:
port: {{ .port }} port: {{ .port }}
targetPort: http targetPort: http
{{- end }} {{- end }}
{{- end }}

View file

@ -1,3 +1,4 @@
{{- if not .Values.ollama.externalHost }}
apiVersion: apps/v1 apiVersion: apps/v1
kind: StatefulSet kind: StatefulSet
metadata: metadata:
@ -94,3 +95,4 @@ spec:
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }}

View file

@ -17,7 +17,9 @@ spec:
resources: resources:
requests: requests:
storage: {{ .Values.webui.persistence.size }} storage: {{ .Values.webui.persistence.size }}
{{- if .Values.webui.persistence.storageClass }}
storageClassName: {{ .Values.webui.persistence.storageClass }} storageClassName: {{ .Values.webui.persistence.storageClass }}
{{- end }}
{{- with .Values.webui.persistence.selector }} {{- with .Values.webui.persistence.selector }}
selector: selector:
{{- toYaml . | nindent 4 }} {{- toYaml . | nindent 4 }}

View file

@ -1,6 +1,7 @@
nameOverride: "" nameOverride: ""
ollama: ollama:
externalHost: ""
annotations: {} annotations: {}
podAnnotations: {} podAnnotations: {}
replicaCount: 1 replicaCount: 1

10
package-lock.json generated
View file

@ -1,12 +1,12 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.117", "version": "0.1.118",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "open-webui", "name": "open-webui",
"version": "0.1.117", "version": "0.1.118",
"dependencies": { "dependencies": {
"@sveltejs/adapter-node": "^1.3.1", "@sveltejs/adapter-node": "^1.3.1",
"async": "^3.2.5", "async": "^3.2.5",
@ -5688,9 +5688,9 @@
} }
}, },
"node_modules/undici": { "node_modules/undici": {
"version": "5.28.3", "version": "5.28.4",
"resolved": "https://registry.npmjs.org/undici/-/undici-5.28.3.tgz", "resolved": "https://registry.npmjs.org/undici/-/undici-5.28.4.tgz",
"integrity": "sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==", "integrity": "sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==",
"dependencies": { "dependencies": {
"@fastify/busboy": "^2.0.0" "@fastify/busboy": "^2.0.0"
}, },

View file

@ -1,6 +1,6 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.117", "version": "0.1.118",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "vite dev --host", "dev": "vite dev --host",

View file

@ -58,7 +58,12 @@ export const userSignIn = async (email: string, password: string) => {
return res; return res;
}; };
export const userSignUp = async (name: string, email: string, password: string) => { export const userSignUp = async (
name: string,
email: string,
password: string,
profile_image_url: string
) => {
let error = null; let error = null;
const res = await fetch(`${WEBUI_API_BASE_URL}/auths/signup`, { const res = await fetch(`${WEBUI_API_BASE_URL}/auths/signup`, {
@ -69,7 +74,8 @@ export const userSignUp = async (name: string, email: string, password: string)
body: JSON.stringify({ body: JSON.stringify({
name: name, name: name,
email: email, email: email,
password: password password: password,
profile_image_url: profile_image_url
}) })
}) })
.then(async (res) => { .then(async (res) => {

View file

@ -345,3 +345,64 @@ export const resetVectorDB = async (token: string) => {
return res; return res;
}; };
export const getEmbeddingModel = async (token: string) => {
let error = null;
const res = await fetch(`${RAG_API_BASE_URL}/embedding/model`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err.detail;
return null;
});
if (error) {
throw error;
}
return res;
};
type EmbeddingModelUpdateForm = {
embedding_model: string;
};
export const updateEmbeddingModel = async (token: string, payload: EmbeddingModelUpdateForm) => {
let error = null;
const res = await fetch(`${RAG_API_BASE_URL}/embedding/model/update`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
...payload
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err.detail;
return null;
});
if (error) {
throw error;
}
return res;
};

View file

@ -295,6 +295,13 @@
const dropZone = document.querySelector('body'); const dropZone = document.querySelector('body');
const handleKeyDown = (event: KeyboardEvent) => {
if (event.key === 'Escape') {
console.log('Escape');
dragged = false;
}
};
const onDragOver = (e) => { const onDragOver = (e) => {
e.preventDefault(); e.preventDefault();
dragged = true; dragged = true;
@ -350,11 +357,15 @@
dragged = false; dragged = false;
}; };
window.addEventListener('keydown', handleKeyDown);
dropZone?.addEventListener('dragover', onDragOver); dropZone?.addEventListener('dragover', onDragOver);
dropZone?.addEventListener('drop', onDrop); dropZone?.addEventListener('drop', onDrop);
dropZone?.addEventListener('dragleave', onDragLeave); dropZone?.addEventListener('dragleave', onDragLeave);
return () => { return () => {
window.removeEventListener('keydown', handleKeyDown);
dropZone?.removeEventListener('dragover', onDragOver); dropZone?.removeEventListener('dragover', onDragOver);
dropZone?.removeEventListener('drop', onDrop); dropZone?.removeEventListener('drop', onDrop);
dropZone?.removeEventListener('dragleave', onDragLeave); dropZone?.removeEventListener('dragleave', onDragLeave);

View file

@ -107,12 +107,8 @@
await sendPrompt(userPrompt, userMessageId, chatId); await sendPrompt(userPrompt, userMessageId, chatId);
}; };
const confirmEditResponseMessage = async (messageId, content) => { const updateChatMessages = async () => {
history.messages[messageId].originalContent = history.messages[messageId].content;
history.messages[messageId].content = content;
await tick(); await tick();
await updateChatById(localStorage.token, chatId, { await updateChatById(localStorage.token, chatId, {
messages: messages, messages: messages,
history: history history: history
@ -121,15 +117,20 @@
await chats.set(await getChatList(localStorage.token)); await chats.set(await getChatList(localStorage.token));
}; };
const rateMessage = async (messageId, rating) => { const confirmEditResponseMessage = async (messageId, content) => {
history.messages[messageId].rating = rating; history.messages[messageId].originalContent = history.messages[messageId].content;
await tick(); history.messages[messageId].content = content;
await updateChatById(localStorage.token, chatId, {
messages: messages,
history: history
});
await chats.set(await getChatList(localStorage.token)); await updateChatMessages();
};
const rateMessage = async (messageId, rating) => {
history.messages[messageId].annotation = {
...history.messages[messageId].annotation,
rating: rating
};
await updateChatMessages();
}; };
const showPreviousMessage = async (message) => { const showPreviousMessage = async (message) => {
@ -338,6 +339,7 @@
siblings={history.messages[message.parentId]?.childrenIds ?? []} siblings={history.messages[message.parentId]?.childrenIds ?? []}
isLastMessage={messageIdx + 1 === messages.length} isLastMessage={messageIdx + 1 === messages.length}
{readOnly} {readOnly}
{updateChatMessages}
{confirmEditResponseMessage} {confirmEditResponseMessage}
{showPreviousMessage} {showPreviousMessage}
{showNextMessage} {showNextMessage}

View file

@ -0,0 +1,117 @@
<script lang="ts">
import { toast } from 'svelte-sonner';
import { createEventDispatcher, onMount } from 'svelte';
const dispatch = createEventDispatcher();
export let show = false;
export let message;
const LIKE_REASONS = [
`Accurate information`,
`Followed instructions perfectly`,
`Showcased creativity`,
`Positive attitude`,
`Attention to detail`,
`Thorough explanation`,
`Other`
];
const DISLIKE_REASONS = [
`Don't like the style`,
`Not factually correct`,
`Didn't fully follow instructions`,
`Refused when it shouldn't have`,
`Being Lazy`,
`Other`
];
let reasons = [];
let selectedReason = null;
let comment = '';
$: if (message.annotation.rating === 1) {
reasons = LIKE_REASONS;
} else if (message.annotation.rating === -1) {
reasons = DISLIKE_REASONS;
}
onMount(() => {
selectedReason = message.annotation.reason;
comment = message.annotation.comment;
});
const submitHandler = () => {
console.log('submitHandler');
message.annotation.reason = selectedReason;
message.annotation.comment = comment;
dispatch('submit');
toast.success('Thanks for your feedback!');
show = false;
};
</script>
<div class=" my-2.5 rounded-xl px-4 py-3 border dark:border-gray-850">
<div class="flex justify-between items-center">
<div class=" text-sm">Tell us more:</div>
<button
on:click={() => {
show = false;
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="size-4"
>
<path stroke-linecap="round" stroke-linejoin="round" d="M6 18 18 6M6 6l12 12" />
</svg>
</button>
</div>
{#if reasons.length > 0}
<div class="flex flex-wrap gap-2 text-sm mt-2.5">
{#each reasons as reason}
<button
class="px-3.5 py-1 border dark:border-gray-850 dark:hover:bg-gray-850 {selectedReason ===
reason
? 'dark:bg-gray-800'
: ''} transition rounded-lg"
on:click={() => {
selectedReason = reason;
}}
>
{reason}
</button>
{/each}
</div>
{/if}
<div class="mt-2">
<textarea
bind:value={comment}
class="w-full text-sm px-1 py-2 bg-transparent outline-none resize-none rounded-xl"
placeholder="Feel free to add specific details"
rows="2"
/>
</div>
<div class="mt-2 flex justify-end">
<button
class=" bg-emerald-700 text-white text-sm font-medium rounded-lg px-3.5 py-1.5"
on:click={() => {
submitHandler();
}}
>
Submit
</button>
</div>
</div>

View file

@ -30,6 +30,7 @@
import Image from '$lib/components/common/Image.svelte'; import Image from '$lib/components/common/Image.svelte';
import { WEBUI_BASE_URL } from '$lib/constants'; import { WEBUI_BASE_URL } from '$lib/constants';
import Tooltip from '$lib/components/common/Tooltip.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte';
import RateComment from './RateComment.svelte';
export let modelfiles = []; export let modelfiles = [];
export let message; export let message;
@ -39,6 +40,7 @@
export let readOnly = false; export let readOnly = false;
export let updateChatMessages: Function;
export let confirmEditResponseMessage: Function; export let confirmEditResponseMessage: Function;
export let showPreviousMessage: Function; export let showPreviousMessage: Function;
export let showNextMessage: Function; export let showNextMessage: Function;
@ -60,6 +62,8 @@
let loadingSpeech = false; let loadingSpeech = false;
let generatingImage = false; let generatingImage = false;
let showRateComment = false;
$: tokens = marked.lexer(sanitizeResponseContent(message.content)); $: tokens = marked.lexer(sanitizeResponseContent(message.content));
const renderer = new marked.Renderer(); const renderer = new marked.Renderer();
@ -536,11 +540,13 @@
<button <button
class="{isLastMessage class="{isLastMessage
? 'visible' ? 'visible'
: 'invisible group-hover:visible'} p-1 rounded {message.rating === 1 : 'invisible group-hover:visible'} p-1 rounded {message?.annotation
?.rating === 1
? 'bg-gray-100 dark:bg-gray-800' ? 'bg-gray-100 dark:bg-gray-800'
: ''} dark:hover:text-white hover:text-black transition" : ''} dark:hover:text-white hover:text-black transition"
on:click={() => { on:click={() => {
rateMessage(message.id, 1); rateMessage(message.id, 1);
showRateComment = true;
}} }}
> >
<svg <svg
@ -563,11 +569,13 @@
<button <button
class="{isLastMessage class="{isLastMessage
? 'visible' ? 'visible'
: 'invisible group-hover:visible'} p-1 rounded {message.rating === -1 : 'invisible group-hover:visible'} p-1 rounded {message?.annotation
?.rating === -1
? 'bg-gray-100 dark:bg-gray-800' ? 'bg-gray-100 dark:bg-gray-800'
: ''} dark:hover:text-white hover:text-black transition" : ''} dark:hover:text-white hover:text-black transition"
on:click={() => { on:click={() => {
rateMessage(message.id, -1); rateMessage(message.id, -1);
showRateComment = true;
}} }}
> >
<svg <svg
@ -824,6 +832,16 @@
{/if} {/if}
</div> </div>
{/if} {/if}
{#if showRateComment}
<RateComment
bind:show={showRateComment}
bind:message
on:submit={() => {
updateChatMessages();
}}
/>
{/if}
</div> </div>
{/if} {/if}
</div> </div>

View file

@ -7,6 +7,7 @@
import UpdatePassword from './Account/UpdatePassword.svelte'; import UpdatePassword from './Account/UpdatePassword.svelte';
import { getGravatarUrl } from '$lib/apis/utils'; import { getGravatarUrl } from '$lib/apis/utils';
import { generateInitialsImage, canvasPixelTest } from '$lib/utils';
import { copyToClipboard } from '$lib/utils'; import { copyToClipboard } from '$lib/utils';
import Plus from '$lib/components/icons/Plus.svelte'; import Plus from '$lib/components/icons/Plus.svelte';
import Tooltip from '$lib/components/common/Tooltip.svelte'; import Tooltip from '$lib/components/common/Tooltip.svelte';
@ -18,6 +19,8 @@
let profileImageUrl = ''; let profileImageUrl = '';
let name = ''; let name = '';
let showAPIKeys = false;
let showJWTToken = false; let showJWTToken = false;
let JWTTokenCopied = false; let JWTTokenCopied = false;
@ -28,6 +31,12 @@
let profileImageInputElement: HTMLInputElement; let profileImageInputElement: HTMLInputElement;
const submitHandler = async () => { const submitHandler = async () => {
if (name !== $user.name) {
if (profileImageUrl === generateInitialsImage($user.name) || profileImageUrl === '') {
profileImageUrl = generateInitialsImage(name);
}
}
const updatedUser = await updateUserProfile(localStorage.token, name, profileImageUrl).catch( const updatedUser = await updateUserProfile(localStorage.token, name, profileImageUrl).catch(
(error) => { (error) => {
toast.error(error); toast.error(error);
@ -125,11 +134,12 @@
}} }}
/> />
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Profile')}</div> <div class="space-y-1">
<!-- <div class=" text-sm font-medium">{$i18n.t('Account')}</div> -->
<div class="flex space-x-5"> <div class="flex space-x-5">
<div class="flex flex-col"> <div class="flex flex-col">
<div class="self-center"> <div class="self-center mt-2">
<button <button
class="relative rounded-full dark:bg-gray-700" class="relative rounded-full dark:bg-gray-700"
type="button" type="button"
@ -138,9 +148,9 @@
}} }}
> >
<img <img
src={profileImageUrl !== '' ? profileImageUrl : '/user.png'} src={profileImageUrl !== '' ? profileImageUrl : generateInitialsImage(name)}
alt="profile" alt="profile"
class=" rounded-full w-16 h-16 object-cover" class=" rounded-full size-16 object-cover"
/> />
<div <div
@ -161,23 +171,56 @@
</div> </div>
</button> </button>
</div> </div>
</div>
<div class="flex-1 flex flex-col self-center gap-0.5">
<div class=" mb-0.5 text-sm font-medium">{$i18n.t('Profile Image')}</div>
<div>
<button <button
class=" text-xs text-gray-600" class=" text-xs text-center text-gray-800 dark:text-gray-400 rounded-full px-4 py-0.5 bg-gray-100 dark:bg-gray-850"
on:click={async () => {
if (canvasPixelTest()) {
profileImageUrl = generateInitialsImage(name);
} else {
toast.info(
$i18n.t(
'Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.'
),
{
duration: 1000 * 10
}
);
}
}}>{$i18n.t('Use Initials')}</button
>
<button
class=" text-xs text-center text-gray-800 dark:text-gray-400 rounded-full px-4 py-0.5 bg-gray-100 dark:bg-gray-850"
on:click={async () => { on:click={async () => {
const url = await getGravatarUrl($user.email); const url = await getGravatarUrl($user.email);
profileImageUrl = url; profileImageUrl = url;
}}>{$i18n.t('Use Gravatar')}</button }}>{$i18n.t('Use Gravatar')}</button
> >
<button
class=" text-xs text-center text-gray-800 dark:text-gray-400 rounded-lg px-2 py-1"
on:click={async () => {
profileImageUrl = '/user.png';
}}>{$i18n.t('Remove')}</button
>
</div>
</div>
</div> </div>
<div class="flex-1"> <div class="pt-0.5">
<div class="flex flex-col w-full"> <div class="flex flex-col w-full">
<div class=" mb-1 text-xs text-gray-500">{$i18n.t('Name')}</div> <div class=" mb-1 text-xs font-medium">{$i18n.t('Name')}</div>
<div class="flex-1"> <div class="flex-1">
<input <input
class="w-full rounded py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none" class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="text" type="text"
bind:value={name} bind:value={name}
required required
@ -187,11 +230,24 @@
</div> </div>
</div> </div>
<hr class=" dark:border-gray-700 my-4" /> <div class="py-0.5">
<UpdatePassword /> <UpdatePassword />
</div>
<hr class=" dark:border-gray-700 my-4" /> <hr class=" dark:border-gray-700 my-4" />
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">{$i18n.t('API keys')}</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
showAPIKeys = !showAPIKeys;
}}>{showAPIKeys ? $i18n.t('Hide') : $i18n.t('Show')}</button
>
</div>
{#if showAPIKeys}
<div class="flex flex-col gap-4"> <div class="flex flex-col gap-4">
<div class="justify-between w-full"> <div class="justify-between w-full">
<div class="flex justify-between w-full"> <div class="flex justify-between w-full">
@ -201,14 +257,14 @@
<div class="flex mt-2"> <div class="flex mt-2">
<div class="flex w-full"> <div class="flex w-full">
<input <input
class="w-full rounded-l-lg py-1.5 pl-4 text-sm bg-white dark:text-gray-300 dark:bg-gray-800 outline-none" class="w-full rounded-l-lg py-1.5 pl-4 text-sm bg-white dark:text-gray-300 dark:bg-gray-850 outline-none"
type={showJWTToken ? 'text' : 'password'} type={showJWTToken ? 'text' : 'password'}
value={localStorage.token} value={localStorage.token}
disabled disabled
/> />
<button <button
class="px-2 transition rounded-r-lg bg-white dark:bg-gray-800" class="px-2 transition rounded-r-lg bg-white dark:bg-gray-850"
on:click={() => { on:click={() => {
showJWTToken = !showJWTToken; showJWTToken = !showJWTToken;
}} }}
@ -248,7 +304,7 @@
</div> </div>
<button <button
class="ml-1.5 px-1.5 py-1 dark:hover:bg-gray-800 transition rounded-lg" class="ml-1.5 px-1.5 py-1 dark:hover:bg-gray-850 transition rounded-lg"
on:click={() => { on:click={() => {
copyToClipboard(localStorage.token); copyToClipboard(localStorage.token);
JWTTokenCopied = true; JWTTokenCopied = true;
@ -301,14 +357,14 @@
{#if APIKey} {#if APIKey}
<div class="flex w-full"> <div class="flex w-full">
<input <input
class="w-full rounded-l-lg py-1.5 pl-4 text-sm bg-white dark:text-gray-300 dark:bg-gray-800 outline-none" class="w-full rounded-l-lg py-1.5 pl-4 text-sm bg-white dark:text-gray-300 dark:bg-gray-850 outline-none"
type={showAPIKey ? 'text' : 'password'} type={showAPIKey ? 'text' : 'password'}
value={APIKey} value={APIKey}
disabled disabled
/> />
<button <button
class="px-2 transition rounded-r-lg bg-white dark:bg-gray-800" class="px-2 transition rounded-r-lg bg-white dark:bg-gray-850"
on:click={() => { on:click={() => {
showAPIKey = !showAPIKey; showAPIKey = !showAPIKey;
}} }}
@ -348,7 +404,7 @@
</div> </div>
<button <button
class="ml-1.5 px-1.5 py-1 dark:hover:bg-gray-800 transition rounded-lg" class="ml-1.5 px-1.5 py-1 dark:hover:bg-gray-850 transition rounded-lg"
on:click={() => { on:click={() => {
copyToClipboard(APIKey); copyToClipboard(APIKey);
APIKeyCopied = true; APIKeyCopied = true;
@ -393,7 +449,7 @@
<Tooltip content="Create new key"> <Tooltip content="Create new key">
<button <button
class=" px-1.5 py-1 dark:hover:bg-gray-800transition rounded-lg" class=" px-1.5 py-1 dark:hover:bg-gray-850transition rounded-lg"
on:click={() => { on:click={() => {
createAPIKeyHandler(); createAPIKeyHandler();
}} }}
@ -416,7 +472,7 @@
</Tooltip> </Tooltip>
{:else} {:else}
<button <button
class="flex gap-1.5 items-center font-medium px-3.5 py-1.5 rounded-lg bg-gray-100/70 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition" class="flex gap-1.5 items-center font-medium px-3.5 py-1.5 rounded-lg bg-gray-100/70 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-850 transition"
on:click={() => { on:click={() => {
createAPIKeyHandler(); createAPIKeyHandler();
}} }}
@ -429,6 +485,7 @@
</div> </div>
</div> </div>
</div> </div>
{/if}
</div> </div>
<div class="flex justify-end pt-3 text-sm font-medium"> <div class="flex justify-end pt-3 text-sm font-medium">

View file

@ -185,7 +185,7 @@
<div> <div>
<div class=" py-0.5 flex w-full justify-between"> <div class=" py-0.5 flex w-full justify-between">
<div class=" self-center text-xs font-medium">{$i18n.t('Desktop Notifications')}</div> <div class=" self-center text-xs font-medium">{$i18n.t('Notifications')}</div>
<button <button
class="p-1 px-3 text-xs flex rounded transition" class="p-1 px-3 text-xs flex rounded transition"

View file

@ -7,6 +7,7 @@
export let show = true; export let show = true;
export let size = 'md'; export let size = 'md';
let modalElement = null;
let mounted = false; let mounted = false;
const sizeToWidth = (size) => { const sizeToWidth = (size) => {
@ -19,14 +20,23 @@
} }
}; };
const handleKeyDown = (event: KeyboardEvent) => {
if (event.key === 'Escape') {
console.log('Escape');
show = false;
}
};
onMount(() => { onMount(() => {
mounted = true; mounted = true;
}); });
$: if (mounted) { $: if (mounted) {
if (show) { if (show) {
window.addEventListener('keydown', handleKeyDown);
document.body.style.overflow = 'hidden'; document.body.style.overflow = 'hidden';
} else { } else {
window.removeEventListener('keydown', handleKeyDown);
document.body.style.overflow = 'unset'; document.body.style.overflow = 'unset';
} }
} }
@ -36,6 +46,7 @@
<!-- svelte-ignore a11y-click-events-have-key-events --> <!-- svelte-ignore a11y-click-events-have-key-events -->
<!-- svelte-ignore a11y-no-static-element-interactions --> <!-- svelte-ignore a11y-no-static-element-interactions -->
<div <div
bind:this={modalElement}
class=" fixed top-0 right-0 left-0 bottom-0 bg-black/60 w-full min-h-screen h-screen flex justify-center z-50 overflow-hidden overscroll-contain" class=" fixed top-0 right-0 left-0 bottom-0 bg-black/60 w-full min-h-screen h-screen flex justify-center z-50 overflow-hidden overscroll-contain"
in:fade={{ duration: 10 }} in:fade={{ duration: 10 }}
on:click={() => { on:click={() => {

View file

@ -6,18 +6,23 @@
getQuerySettings, getQuerySettings,
scanDocs, scanDocs,
updateQuerySettings, updateQuerySettings,
resetVectorDB resetVectorDB,
getEmbeddingModel,
updateEmbeddingModel
} from '$lib/apis/rag'; } from '$lib/apis/rag';
import { documents } from '$lib/stores'; import { documents } from '$lib/stores';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
export let saveHandler: Function; export let saveHandler: Function;
let loading = false; let scanDirLoading = false;
let updateEmbeddingModelLoading = false;
let showResetConfirm = false; let showResetConfirm = false;
@ -30,10 +35,12 @@
k: 4 k: 4
}; };
let embeddingModel = '';
const scanHandler = async () => { const scanHandler = async () => {
loading = true; scanDirLoading = true;
const res = await scanDocs(localStorage.token); const res = await scanDocs(localStorage.token);
loading = false; scanDirLoading = false;
if (res) { if (res) {
await documents.set(await getDocs(localStorage.token)); await documents.set(await getDocs(localStorage.token));
@ -41,6 +48,38 @@
} }
}; };
const embeddingModelUpdateHandler = async () => {
if (embeddingModel.split('/').length - 1 > 1) {
toast.error(
$i18n.t(
'Model filesystem path detected. Model shortname is required for update, cannot continue.'
)
);
return;
}
console.log('Update embedding model attempt:', embeddingModel);
updateEmbeddingModelLoading = true;
const res = await updateEmbeddingModel(localStorage.token, {
embedding_model: embeddingModel
}).catch(async (error) => {
toast.error(error);
embeddingModel = (await getEmbeddingModel(localStorage.token)).embedding_model;
return null;
});
updateEmbeddingModelLoading = false;
if (res) {
console.log('embeddingModelUpdateHandler:', res);
if (res.status === true) {
toast.success($i18n.t('Model {{embedding_model}} update complete!', res), {
duration: 1000 * 10
});
}
}
};
const submitHandler = async () => { const submitHandler = async () => {
const res = await updateRAGConfig(localStorage.token, { const res = await updateRAGConfig(localStorage.token, {
pdf_extract_images: pdfExtractImages, pdf_extract_images: pdfExtractImages,
@ -62,6 +101,8 @@
chunkOverlap = res.chunk.chunk_overlap; chunkOverlap = res.chunk.chunk_overlap;
} }
embeddingModel = (await getEmbeddingModel(localStorage.token)).embedding_model;
querySettings = await getQuerySettings(localStorage.token); querySettings = await getQuerySettings(localStorage.token);
}); });
</script> </script>
@ -73,7 +114,7 @@
saveHandler(); saveHandler();
}} }}
> >
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80"> <div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[22rem]">
<div> <div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('General Settings')}</div> <div class=" mb-2 text-sm font-medium">{$i18n.t('General Settings')}</div>
@ -83,7 +124,7 @@
</div> </div>
<button <button
class=" self-center text-xs p-1 px-3 bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 rounded flex flex-row space-x-1 items-center {loading class=" self-center text-xs p-1 px-3 bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 rounded-lg flex flex-row space-x-1 items-center {scanDirLoading
? ' cursor-not-allowed' ? ' cursor-not-allowed'
: ''}" : ''}"
on:click={() => { on:click={() => {
@ -91,24 +132,11 @@
console.log('check'); console.log('check');
}} }}
type="button" type="button"
disabled={loading} disabled={scanDirLoading}
> >
<div class="self-center font-medium">{$i18n.t('Scan')}</div> <div class="self-center font-medium">{$i18n.t('Scan')}</div>
<!-- <svg {#if scanDirLoading}
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-3 h-3"
>
<path
fill-rule="evenodd"
d="M13.836 2.477a.75.75 0 0 1 .75.75v3.182a.75.75 0 0 1-.75.75h-3.182a.75.75 0 0 1 0-1.5h1.37l-.84-.841a4.5 4.5 0 0 0-7.08.932.75.75 0 0 1-1.3-.75 6 6 0 0 1 9.44-1.242l.842.84V3.227a.75.75 0 0 1 .75-.75Zm-.911 7.5A.75.75 0 0 1 13.199 11a6 6 0 0 1-9.44 1.241l-.84-.84v1.371a.75.75 0 0 1-1.5 0V9.591a.75.75 0 0 1 .75-.75H5.35a.75.75 0 0 1 0 1.5H3.98l.841.841a4.5 4.5 0 0 0 7.08-.932.75.75 0 0 1 1.025-.273Z"
clip-rule="evenodd"
/>
</svg> -->
{#if loading}
<div class="ml-3 self-center"> <div class="ml-3 self-center">
<svg <svg
class=" w-3 h-3" class=" w-3 h-3"
@ -141,6 +169,78 @@
<hr class=" dark:border-gray-700" /> <hr class=" dark:border-gray-700" />
<div class="space-y-2">
<div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('Update Embedding Model')}</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Update embedding model (e.g. {{model}})', {
model: embeddingModel.slice(-40)
})}
bind:value={embeddingModel}
/>
</div>
<button
class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition"
on:click={() => {
embeddingModelUpdateHandler();
}}
disabled={updateEmbeddingModelLoading}
>
{#if updateEmbeddingModelLoading}
<div class="self-center">
<svg
class=" w-4 h-4"
viewBox="0 0 24 24"
fill="currentColor"
xmlns="http://www.w3.org/2000/svg"
><style>
.spinner_ajPY {
transform-origin: center;
animation: spinner_AtaB 0.75s infinite linear;
}
@keyframes spinner_AtaB {
100% {
transform: rotate(360deg);
}
}
</style><path
d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
opacity=".25"
/><path
d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
class="spinner_ajPY"
/></svg
>
</div>
{:else}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 2.75a.75.75 0 0 0-1.5 0v5.69L5.03 6.22a.75.75 0 0 0-1.06 1.06l3.5 3.5a.75.75 0 0 0 1.06 0l3.5-3.5a.75.75 0 0 0-1.06-1.06L8.75 8.44V2.75Z"
/>
<path
d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
/>
</svg>
{/if}
</button>
</div>
<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t(
'Warning: If you update or change your embedding model, you will need to re-import all documents.'
)}
</div>
<hr class=" dark:border-gray-700 my-3" />
<div class=" "> <div class=" ">
<div class=" text-sm font-medium">{$i18n.t('Chunk Params')}</div> <div class=" text-sm font-medium">{$i18n.t('Chunk Params')}</div>
@ -150,7 +250,7 @@
<div class="self-center p-3"> <div class="self-center p-3">
<input <input
class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600" class=" w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number" type="number"
placeholder={$i18n.t('Enter Chunk Size')} placeholder={$i18n.t('Enter Chunk Size')}
bind:value={chunkSize} bind:value={chunkSize}
@ -161,11 +261,13 @@
</div> </div>
<div class="flex w-full"> <div class="flex w-full">
<div class=" self-center text-xs font-medium min-w-fit">{$i18n.t('Chunk Overlap')}</div> <div class=" self-center text-xs font-medium min-w-fit">
{$i18n.t('Chunk Overlap')}
</div>
<div class="self-center p-3"> <div class="self-center p-3">
<input <input
class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600" class="w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number" type="number"
placeholder={$i18n.t('Enter Chunk Overlap')} placeholder={$i18n.t('Enter Chunk Overlap')}
bind:value={chunkOverlap} bind:value={chunkOverlap}
@ -176,7 +278,7 @@
</div> </div>
</div> </div>
<div> <div class="pr-2">
<div class="flex justify-between items-center text-xs"> <div class="flex justify-between items-center text-xs">
<div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div> <div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div>
@ -191,6 +293,8 @@
</div> </div>
</div> </div>
<hr class=" dark:border-gray-700 my-3" />
<div> <div>
<div class=" text-sm font-medium">{$i18n.t('Query Params')}</div> <div class=" text-sm font-medium">{$i18n.t('Query Params')}</div>
@ -200,7 +304,7 @@
<div class="self-center p-3"> <div class="self-center p-3">
<input <input
class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600" class=" w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number" type="number"
placeholder={$i18n.t('Enter Top K')} placeholder={$i18n.t('Enter Top K')}
bind:value={querySettings.k} bind:value={querySettings.k}
@ -209,34 +313,19 @@
/> />
</div> </div>
</div> </div>
<!-- <div class="flex w-full">
<div class=" self-center text-xs font-medium min-w-fit">Chunk Overlap</div>
<div class="self-center p-3">
<input
class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
type="number"
placeholder="Enter Chunk Overlap"
bind:value={chunkOverlap}
autocomplete="off"
min="0"
/>
</div>
</div> -->
</div> </div>
<div> <div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div> <div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div>
<textarea <textarea
bind:value={querySettings.template} bind:value={querySettings.template}
class="w-full rounded p-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none" class="w-full rounded-lg px-4 py-3 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
rows="4" rows="4"
/> />
</div> </div>
</div> </div>
<hr class=" dark:border-gray-700" /> <hr class=" dark:border-gray-700 my-3" />
{#if showResetConfirm} {#if showResetConfirm}
<div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition"> <div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition">
@ -330,7 +419,8 @@
</button> </button>
{/if} {/if}
</div> </div>
</div>
</div>
<div class="flex justify-end pt-3 text-sm font-medium"> <div class="flex justify-end pt-3 text-sm font-medium">
<button <button
class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded" class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Изтрито {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Изтрито {{deleteModelTag}}",
"Deleted {tagName}": "Изтрито {tagName}", "Deleted {tagName}": "Изтрито {tagName}",
"Description": "Описание", "Description": "Описание",
"Desktop Notifications": "Десктоп Известия", "Notifications": "Десктоп Известия",
"Disabled": "Деактивиран", "Disabled": "Деактивиран",
"Discover a modelfile": "Откриване на модфайл", "Discover a modelfile": "Откриване на модфайл",
"Discover a prompt": "Откриване на промпт", "Discover a prompt": "Откриване на промпт",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Esborrat {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Esborrat {{deleteModelTag}}",
"Deleted {tagName}": "Esborrat {tagName}", "Deleted {tagName}": "Esborrat {tagName}",
"Description": "Descripció", "Description": "Descripció",
"Desktop Notifications": "Notificacions d'Escriptori", "Notifications": "Notificacions d'Escriptori",
"Disabled": "Desactivat", "Disabled": "Desactivat",
"Discover a modelfile": "Descobreix un fitxer de model", "Discover a modelfile": "Descobreix un fitxer de model",
"Discover a prompt": "Descobreix un prompt", "Discover a prompt": "Descobreix un prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} gelöscht", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} gelöscht",
"Deleted {tagName}": "{tagName} gelöscht", "Deleted {tagName}": "{tagName} gelöscht",
"Description": "Beschreibung", "Description": "Beschreibung",
"Desktop Notifications": "Desktop-Benachrichtigungen", "Notifications": "Desktop-Benachrichtigungen",
"Disabled": "Deaktiviert", "Disabled": "Deaktiviert",
"Discover a modelfile": "Eine Modelfiles entdecken", "Discover a modelfile": "Eine Modelfiles entdecken",
"Discover a prompt": "Einen Prompt entdecken", "Discover a prompt": "Einen Prompt entdecken",

View file

@ -0,0 +1,64 @@
{
"analyze": "analyse",
"analyzed": "analysed",
"analyzes": "analyses",
"apologize": "apologise",
"apologized": "apologised",
"apologizes": "apologises",
"apologizing": "apologising",
"canceled": "cancelled",
"canceling": "cancelling",
"capitalize": "capitalise",
"capitalized": "capitalised",
"capitalizes": "capitalises",
"center": "centre",
"centered": "centred",
"color": "colour",
"colorize": "colourise",
"customize": "customise",
"customizes": "customises",
"defense": "defence",
"dialog": "dialogue",
"emphasize": "emphasise",
"emphasized": "emphasised",
"emphasizes": "emphasises",
"favor": "favour",
"favorable": "favourable",
"favorite": "favourite",
"favoritism": "favouritism",
"labor": "labour",
"labored": "laboured",
"laboring": "labouring",
"maximize": "maximise",
"maximizes": "maximises",
"minimize": "minimise",
"minimizes": "minimises",
"neighbor": "neighbour",
"neighborhood": "neighbourhood",
"offense": "offence",
"organize": "organise",
"organizes": "organises",
"personalize": "personalise",
"personalizes": "personalises",
"program": "programme",
"programmed": "programmed",
"programs": "programmes",
"quantization": "quantisation",
"quantize": "quantise",
"randomize": "randomise",
"randomizes": "randomises",
"realize": "realise",
"realizes": "realises",
"recognize": "recognise",
"recognizes": "recognises",
"summarize": "summarise",
"summarizes": "summarises",
"theater": "theatre",
"theaters": "theatres",
"toward": "towards",
"traveled": "travelled",
"traveler": "traveller",
"traveling": "travelling",
"utilize": "utilise",
"utilizes": "utilises"
}

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "", "Deleted {{deleteModelTag}}": "",
"Deleted {tagName}": "", "Deleted {tagName}": "",
"Description": "", "Description": "",
"Desktop Notifications": "", "Notifications": "",
"Disabled": "", "Disabled": "",
"Discover a modelfile": "", "Discover a modelfile": "",
"Discover a prompt": "", "Discover a prompt": "",
@ -120,6 +120,7 @@
"Edit Doc": "", "Edit Doc": "",
"Edit User": "", "Edit User": "",
"Email": "", "Email": "",
"Embedding model: {{embedding_model}}": "",
"Enable Chat History": "", "Enable Chat History": "",
"Enable New Sign Ups": "", "Enable New Sign Ups": "",
"Enabled": "", "Enabled": "",
@ -150,6 +151,7 @@
"Failed to read clipboard contents": "", "Failed to read clipboard contents": "",
"File Mode": "", "File Mode": "",
"File not found.": "", "File not found.": "",
"Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "",
"Focus chat input": "", "Focus chat input": "",
"Format your variables using square brackets like this:": "", "Format your variables using square brackets like this:": "",
"From (Base Model)": "", "From (Base Model)": "",
@ -193,8 +195,11 @@
"MMMM DD, YYYY": "", "MMMM DD, YYYY": "",
"Model '{{modelName}}' has been successfully downloaded.": "", "Model '{{modelName}}' has been successfully downloaded.": "",
"Model '{{modelTag}}' is already in queue for downloading.": "", "Model '{{modelTag}}' is already in queue for downloading.": "",
"Model {{embedding_model}} update complete!": "",
"Model {{embedding_model}} update failed or not required!": "",
"Model {{modelId}} not found": "", "Model {{modelId}} not found": "",
"Model {{modelName}} already exists.": "", "Model {{modelName}} already exists.": "",
"Model filesystem path detected. Model shortname is required for update, cannot continue.": "",
"Model Name": "", "Model Name": "",
"Model not selected": "", "Model not selected": "",
"Model Tag Name": "", "Model Tag Name": "",
@ -332,7 +337,10 @@
"TTS Settings": "", "TTS Settings": "",
"Type Hugging Face Resolve (Download) URL": "", "Type Hugging Face Resolve (Download) URL": "",
"Uh-oh! There was an issue connecting to {{provider}}.": "", "Uh-oh! There was an issue connecting to {{provider}}.": "",
"Understand that updating or changing your embedding model requires reset of the vector database and re-import of all documents. You have been warned!": "",
"Unknown File Type '{{file_type}}', but accepting and treating as plain text": "", "Unknown File Type '{{file_type}}', but accepting and treating as plain text": "",
"Update": "",
"Update embedding model {{embedding_model}}": "",
"Update password": "", "Update password": "",
"Upload a GGUF model": "", "Upload a GGUF model": "",
"Upload files": "", "Upload files": "",
@ -340,6 +348,7 @@
"URL Mode": "", "URL Mode": "",
"Use '#' in the prompt input to load and select your documents.": "", "Use '#' in the prompt input to load and select your documents.": "",
"Use Gravatar": "", "Use Gravatar": "",
"Use Initials": "",
"user": "", "user": "",
"User Permissions": "", "User Permissions": "",
"Users": "", "Users": "",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Se borró {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Se borró {{deleteModelTag}}",
"Deleted {tagName}": "Se borró {tagName}", "Deleted {tagName}": "Se borró {tagName}",
"Description": "Descripción", "Description": "Descripción",
"Desktop Notifications": "Notificaciones", "Notifications": "Notificaciones",
"Disabled": "Desactivado", "Disabled": "Desactivado",
"Discover a modelfile": "Descubre un modelfile", "Discover a modelfile": "Descubre un modelfile",
"Discover a prompt": "Descubre un Prompt", "Discover a prompt": "Descubre un Prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} پاک شد", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} پاک شد",
"Deleted {tagName}": "{tagName} حذف شد", "Deleted {tagName}": "{tagName} حذف شد",
"Description": "توضیحات", "Description": "توضیحات",
"Desktop Notifications": "اعلان", "Notifications": "اعلان",
"Disabled": "غیرفعال", "Disabled": "غیرفعال",
"Discover a modelfile": "فایل مدل را کشف کنید", "Discover a modelfile": "فایل مدل را کشف کنید",
"Discover a prompt": "یک اعلان را کشف کنید", "Discover a prompt": "یک اعلان را کشف کنید",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} supprimé", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} supprimé",
"Deleted {tagName}": "{tagName} supprimé", "Deleted {tagName}": "{tagName} supprimé",
"Description": "Description", "Description": "Description",
"Desktop Notifications": "Notifications de bureau", "Notifications": "Notifications de bureau",
"Disabled": "Désactivé", "Disabled": "Désactivé",
"Discover a modelfile": "Découvrir un fichier de modèle", "Discover a modelfile": "Découvrir un fichier de modèle",
"Discover a prompt": "Découvrir un prompt", "Discover a prompt": "Découvrir un prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} supprimé", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} supprimé",
"Deleted {tagName}": "{tagName} supprimé", "Deleted {tagName}": "{tagName} supprimé",
"Description": "Description", "Description": "Description",
"Desktop Notifications": "Notifications de bureau", "Notifications": "Notifications de bureau",
"Disabled": "Désactivé", "Disabled": "Désactivé",
"Discover a modelfile": "Découvrir un fichier de modèle", "Discover a modelfile": "Découvrir un fichier de modèle",
"Discover a prompt": "Découvrir un prompt", "Discover a prompt": "Découvrir un prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Eliminato {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Eliminato {{deleteModelTag}}",
"Deleted {tagName}": "Eliminato {tagName}", "Deleted {tagName}": "Eliminato {tagName}",
"Description": "Descrizione", "Description": "Descrizione",
"Desktop Notifications": "Notifiche desktop", "Notifications": "Notifiche desktop",
"Disabled": "Disabilitato", "Disabled": "Disabilitato",
"Discover a modelfile": "Scopri un file modello", "Discover a modelfile": "Scopri un file modello",
"Discover a prompt": "Scopri un prompt", "Discover a prompt": "Scopri un prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} を削除しました", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} を削除しました",
"Deleted {tagName}": "{tagName} を削除しました", "Deleted {tagName}": "{tagName} を削除しました",
"Description": "説明", "Description": "説明",
"Desktop Notifications": "デスクトップ通知", "Notifications": "デスクトップ通知",
"Disabled": "無効", "Disabled": "無効",
"Discover a modelfile": "モデルファイルを見つける", "Discover a modelfile": "モデルファイルを見つける",
"Discover a prompt": "プロンプトを見つける", "Discover a prompt": "プロンプトを見つける",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} 삭제됨", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} 삭제됨",
"Deleted {tagName}": "{tagName} 삭제됨", "Deleted {tagName}": "{tagName} 삭제됨",
"Description": "설명", "Description": "설명",
"Desktop Notifications": "알림", "Notifications": "알림",
"Disabled": "비활성화", "Disabled": "비활성화",
"Discover a modelfile": "모델파일 검색", "Discover a modelfile": "모델파일 검색",
"Discover a prompt": "프롬프트 검색", "Discover a prompt": "프롬프트 검색",

View file

@ -15,6 +15,10 @@
"code": "de-DE", "code": "de-DE",
"title": "Deutsch" "title": "Deutsch"
}, },
{
"code": "en-GB",
"title": "English (GB)"
},
{ {
"code": "es-ES", "code": "es-ES",
"title": "Spanish" "title": "Spanish"

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} is verwijderd", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} is verwijderd",
"Deleted {tagName}": "{tagName} is verwijderd", "Deleted {tagName}": "{tagName} is verwijderd",
"Description": "Beschrijving", "Description": "Beschrijving",
"Desktop Notifications": "Desktop Notificaties", "Notifications": "Desktop Notificaties",
"Disabled": "Uitgeschakeld", "Disabled": "Uitgeschakeld",
"Discover a modelfile": "Ontdek een modelfile", "Discover a modelfile": "Ontdek een modelfile",
"Discover a prompt": "Ontdek een prompt", "Discover a prompt": "Ontdek een prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} excluído", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} excluído",
"Deleted {tagName}": "{tagName} excluído", "Deleted {tagName}": "{tagName} excluído",
"Description": "Descrição", "Description": "Descrição",
"Desktop Notifications": "Notificações da Área de Trabalho", "Notifications": "Notificações da Área de Trabalho",
"Disabled": "Desativado", "Disabled": "Desativado",
"Discover a modelfile": "Descobrir um arquivo de modelo", "Discover a modelfile": "Descobrir um arquivo de modelo",
"Discover a prompt": "Descobrir um prompt", "Discover a prompt": "Descobrir um prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} excluído", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} excluído",
"Deleted {tagName}": "{tagName} excluído", "Deleted {tagName}": "{tagName} excluído",
"Description": "Descrição", "Description": "Descrição",
"Desktop Notifications": "Notificações da Área de Trabalho", "Notifications": "Notificações da Área de Trabalho",
"Disabled": "Desativado", "Disabled": "Desativado",
"Discover a modelfile": "Descobrir um arquivo de modelo", "Discover a modelfile": "Descobrir um arquivo de modelo",
"Discover a prompt": "Descobrir um prompt", "Discover a prompt": "Descobrir um prompt",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Удалено {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Удалено {{deleteModelTag}}",
"Deleted {tagName}": "Удалено {tagName}", "Deleted {tagName}": "Удалено {tagName}",
"Description": "Описание", "Description": "Описание",
"Desktop Notifications": "Уведомления на рабочем столе", "Notifications": "Уведомления на рабочем столе",
"Disabled": "Отключено", "Disabled": "Отключено",
"Discover a modelfile": "Найти файл модели", "Discover a modelfile": "Найти файл модели",
"Discover a prompt": "Найти промт", "Discover a prompt": "Найти промт",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "{{deleteModelTag}} silindi", "Deleted {{deleteModelTag}}": "{{deleteModelTag}} silindi",
"Deleted {tagName}": "{tagName} silindi", "Deleted {tagName}": "{tagName} silindi",
"Description": "Açıklama", "Description": "Açıklama",
"Desktop Notifications": "Masaüstü Bildirimleri", "Notifications": "Bildirimler",
"Disabled": "Devre Dışı", "Disabled": "Devre Dışı",
"Discover a modelfile": "Bir model dosyası keşfedin", "Discover a modelfile": "Bir model dosyası keşfedin",
"Discover a prompt": "Bir prompt keşfedin", "Discover a prompt": "Bir prompt keşfedin",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Видалено {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Видалено {{deleteModelTag}}",
"Deleted {tagName}": "Видалено {tagName}", "Deleted {tagName}": "Видалено {tagName}",
"Description": "Опис", "Description": "Опис",
"Desktop Notifications": "Сповіщення", "Notifications": "Сповіщення",
"Disabled": "Вимкнено", "Disabled": "Вимкнено",
"Discover a modelfile": "Знайти файл моделі", "Discover a modelfile": "Знайти файл моделі",
"Discover a prompt": "Знайти промт", "Discover a prompt": "Знайти промт",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "Đã xóa {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "Đã xóa {{deleteModelTag}}",
"Deleted {tagName}": "Đã xóa {tagName}", "Deleted {tagName}": "Đã xóa {tagName}",
"Description": "Mô tả", "Description": "Mô tả",
"Desktop Notifications": "Thông báo trên máy tính (Notification)", "Notifications": "Thông báo trên máy tính (Notification)",
"Disabled": "Đã vô hiệu hóa", "Disabled": "Đã vô hiệu hóa",
"Discover a modelfile": "Khám phá thêm các mô hình mới", "Discover a modelfile": "Khám phá thêm các mô hình mới",
"Discover a prompt": "Khám phá thêm prompt mới", "Discover a prompt": "Khám phá thêm prompt mới",

View file

@ -100,7 +100,7 @@
"Deleted {{deleteModelTag}}": "已删除{{deleteModelTag}}", "Deleted {{deleteModelTag}}": "已删除{{deleteModelTag}}",
"Deleted {tagName}": "已删除{tagName}", "Deleted {tagName}": "已删除{tagName}",
"Description": "描述", "Description": "描述",
"Desktop Notifications": "桌面通知", "Notifications": "桌面通知",
"Disabled": "禁用", "Disabled": "禁用",
"Discover a modelfile": "探索模型文件", "Discover a modelfile": "探索模型文件",
"Discover a prompt": "探索提示词", "Discover a prompt": "探索提示词",

View file

@ -101,7 +101,7 @@
"Deleted {{deleteModelTag}}": "已刪除 {{deleteModelTag}}", "Deleted {{deleteModelTag}}": "已刪除 {{deleteModelTag}}",
"Deleted {tagName}": "已刪除 {tagName}", "Deleted {tagName}": "已刪除 {tagName}",
"Description": "描述", "Description": "描述",
"Desktop Notifications": "桌面通知", "Notifications": "桌面通知",
"Disabled": "已停用", "Disabled": "已停用",
"Discover a modelfile": "發現新 Modelfile", "Discover a modelfile": "發現新 Modelfile",
"Discover a prompt": "發現新提示詞", "Discover a prompt": "發現新提示詞",

View file

@ -111,6 +111,82 @@ export const getGravatarURL = (email) => {
return `https://www.gravatar.com/avatar/${hash}`; return `https://www.gravatar.com/avatar/${hash}`;
}; };
export const canvasPixelTest = () => {
// Test a 1x1 pixel to potentially identify browser/plugin fingerprint blocking or spoofing
// Inspiration: https://github.com/kkapsner/CanvasBlocker/blob/master/test/detectionTest.js
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.height = 1;
canvas.width = 1;
const imageData = new ImageData(canvas.width, canvas.height);
const pixelValues = imageData.data;
// Generate RGB test data
for (let i = 0; i < imageData.data.length; i += 1) {
if (i % 4 !== 3) {
pixelValues[i] = Math.floor(256 * Math.random());
} else {
pixelValues[i] = 255;
}
}
ctx.putImageData(imageData, 0, 0);
const p = ctx.getImageData(0, 0, canvas.width, canvas.height).data;
// Read RGB data and fail if unmatched
for (let i = 0; i < p.length; i += 1) {
if (p[i] !== pixelValues[i]) {
console.log(
'canvasPixelTest: Wrong canvas pixel RGB value detected:',
p[i],
'at:',
i,
'expected:',
pixelValues[i]
);
console.log('canvasPixelTest: Canvas blocking or spoofing is likely');
return false;
}
}
return true;
};
export const generateInitialsImage = (name) => {
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
canvas.width = 100;
canvas.height = 100;
if (!canvasPixelTest()) {
console.log(
'generateInitialsImage: failed pixel test, fingerprint evasion is likely. Using default image.'
);
return '/user.png';
}
ctx.fillStyle = '#F39C12';
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.fillStyle = '#FFFFFF';
ctx.font = '40px Helvetica';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
const sanitizedName = name.trim();
const initials =
sanitizedName.length > 0
? sanitizedName[0] +
(sanitizedName.split(' ').length > 1
? sanitizedName[sanitizedName.lastIndexOf(' ') + 1]
: '')
: '';
ctx.fillText(initials.toUpperCase(), canvas.width / 2, canvas.height / 2);
return canvas.toDataURL();
};
export const copyToClipboard = (text) => { export const copyToClipboard = (text) => {
if (!navigator.clipboard) { if (!navigator.clipboard) {
const textArea = document.createElement('textarea'); const textArea = document.createElement('textarea');

View file

@ -4,6 +4,8 @@
import { goto } from '$app/navigation'; import { goto } from '$app/navigation';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
import dayjs from 'dayjs';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { updateUserRole, getUsers, deleteUserById } from '$lib/apis/users'; import { updateUserRole, getUsers, deleteUserById } from '$lib/apis/users';
@ -16,6 +18,7 @@
let loaded = false; let loaded = false;
let users = []; let users = [];
let search = '';
let selectedUser = null; let selectedUser = null;
let showSettingsModal = false; let showSettingsModal = false;
@ -80,20 +83,15 @@
<SettingsModal bind:show={showSettingsModal} /> <SettingsModal bind:show={showSettingsModal} />
<div class="min-h-screen max-h-[100dvh] w-full flex justify-center dark:text-white font-mona"> <div class="min-h-screen max-h-[100dvh] w-full flex justify-center dark:text-white">
{#if loaded} {#if loaded}
<div class=" flex flex-col justify-between w-full overflow-y-auto"> <div class=" flex flex-col justify-between w-full overflow-y-auto">
<div class="max-w-2xl mx-auto w-full px-3 md:px-0 my-10"> <div class=" mx-auto w-full">
<div class="w-full"> <div class="w-full">
<div class=" flex flex-col justify-center"> <div class=" flex flex-col justify-center">
<div class=" px-5 pt-3">
<div class=" flex justify-between items-center"> <div class=" flex justify-between items-center">
<div class="flex items-center text-2xl font-semibold"> <div class="flex items-center text-2xl font-semibold">Dashboard</div>
{$i18n.t('All Users')}
<div class="flex self-center w-[1px] h-6 mx-2.5 bg-gray-200 dark:bg-gray-700" />
<span class="text-lg font-medium text-gray-500 dark:text-gray-300"
>{users.length}</span
>
</div>
<div> <div>
<button <button
class="flex items-center space-x-1 px-3 py-1.5 rounded-xl bg-gray-50 hover:bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 transition" class="flex items-center space-x-1 px-3 py-1.5 rounded-xl bg-gray-50 hover:bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 transition"
@ -119,32 +117,64 @@
</button> </button>
</div> </div>
</div> </div>
<div class=" text-gray-500 text-xs mt-1">
{$i18n.t("Click on the user role button to change a user's role.")}
</div> </div>
<hr class=" my-3 dark:border-gray-600" /> <div class="px-5 flex text-sm gap-2.5">
<div class="py-3 border-b font-medium text-gray-100 cursor-pointer">Overview</div>
<!-- <div class="py-3 text-gray-300 cursor-pointer">Users</div> -->
</div>
<hr class=" mb-3 dark:border-gray-800" />
<div class="px-5">
<div class="mt-0.5 mb-3 flex justify-between">
<div class="flex text-lg font-medium px-0.5">
{$i18n.t('All Users')}
<div class="flex self-center w-[1px] h-6 mx-2.5 bg-gray-200 dark:bg-gray-700" />
<span class="text-lg font-medium text-gray-500 dark:text-gray-300"
>{users.length}</span
>
</div>
<div class="">
<input
class=" w-60 rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Search')}
bind:value={search}
/>
</div>
</div>
<div class="scrollbar-hidden relative overflow-x-auto whitespace-nowrap"> <div class="scrollbar-hidden relative overflow-x-auto whitespace-nowrap">
<table class="w-full text-sm text-left text-gray-500 dark:text-gray-400 table-auto"> <table class="w-full text-sm text-left text-gray-500 dark:text-gray-400 table-auto">
<thead <thead
class="text-xs text-gray-700 uppercase bg-gray-50 dark:bg-gray-700 dark:text-gray-400" class="text-xs text-gray-700 uppercase bg-gray-50 dark:bg-gray-850 dark:text-gray-400"
> >
<tr> <tr>
<th scope="col" class="px-3 py-2"> {$i18n.t('Role')} </th> <th scope="col" class="px-3 py-2"> {$i18n.t('Role')} </th>
<th scope="col" class="px-3 py-2"> {$i18n.t('Name')} </th> <th scope="col" class="px-3 py-2"> {$i18n.t('Name')} </th>
<th scope="col" class="px-3 py-2"> {$i18n.t('Email')} </th> <th scope="col" class="px-3 py-2"> {$i18n.t('Email')} </th>
<th scope="col" class="px-3 py-2"> {$i18n.t('Action')} </th> <th scope="col" class="px-3 py-2"> {$i18n.t('Created at')} </th>
<th scope="col" class="px-3 py-2 text-right" />
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
{#each users as user} {#each users.filter((user) => {
if (search === '') {
return true;
} else {
let name = user.name.toLowerCase();
const query = search.toLowerCase();
return name.includes(query);
}
}) as user}
<tr class="bg-white border-b dark:bg-gray-900 dark:border-gray-700 text-xs"> <tr class="bg-white border-b dark:bg-gray-900 dark:border-gray-700 text-xs">
<td class="px-3 py-2 min-w-[7rem] w-28"> <td class="px-3 py-2 min-w-[7rem] w-28">
<button <button
class=" flex items-center gap-2 text-xs px-3 py-0.5 rounded-lg {user.role === class=" flex items-center gap-2 text-xs px-3 py-0.5 rounded-lg {user.role ===
'admin' && 'admin' &&
'text-sky-600 dark:text-sky-200 bg-sky-200/30'} {user.role === 'user' && 'text-sky-600 dark:text-sky-200 bg-sky-200/30'} {user.role ===
'user' &&
'text-green-600 dark:text-green-200 bg-green-200/30'} {user.role === 'text-green-600 dark:text-green-200 bg-green-200/30'} {user.role ===
'pending' && 'text-gray-600 dark:text-gray-200 bg-gray-200/30'}" 'pending' && 'text-gray-600 dark:text-gray-200 bg-gray-200/30'}"
on:click={() => { on:click={() => {
@ -180,7 +210,11 @@
<td class=" px-3 py-2"> {user.email} </td> <td class=" px-3 py-2"> {user.email} </td>
<td class=" px-3 py-2"> <td class=" px-3 py-2">
<div class="flex justify-start w-full"> {dayjs(user.timestamp * 1000).format($i18n.t('MMMM DD, YYYY'))}
</td>
<td class="px-3 py-2 text-right">
<div class="flex justify-end w-full">
<button <button
class="self-center w-fit text-sm px-2 py-2 hover:bg-black/5 dark:hover:bg-white/5 rounded-xl" class="self-center w-fit text-sm px-2 py-2 hover:bg-black/5 dark:hover:bg-white/5 rounded-xl"
on:click={async () => { on:click={async () => {
@ -232,6 +266,11 @@
</tbody> </tbody>
</table> </table>
</div> </div>
<div class=" text-gray-500 text-xs mt-2 text-right">
{$i18n.t("Click on the user role button to change a user's role.")}
</div>
</div>
</div> </div>
</div> </div>
</div> </div>

View file

@ -6,6 +6,7 @@
import { WEBUI_NAME, config, user } from '$lib/stores'; import { WEBUI_NAME, config, user } from '$lib/stores';
import { onMount, getContext } from 'svelte'; import { onMount, getContext } from 'svelte';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import { generateInitialsImage, canvasPixelTest } from '$lib/utils';
const i18n = getContext('i18n'); const i18n = getContext('i18n');
@ -36,10 +37,12 @@
}; };
const signUpHandler = async () => { const signUpHandler = async () => {
const sessionUser = await userSignUp(name, email, password).catch((error) => { const sessionUser = await userSignUp(name, email, password, generateInitialsImage(name)).catch(
(error) => {
toast.error(error); toast.error(error);
return null; return null;
}); }
);
await setSessionUser(sessionUser); await setSessionUser(sessionUser);
}; };