From 099b1d066bd593b3119fb7b0e850627b95548711 Mon Sep 17 00:00:00 2001 From: Jannik S <69747628+jannikstdl@users.noreply.github.com> Date: Tue, 2 Apr 2024 11:28:04 +0200 Subject: [PATCH] Revert "Merge Updates & Dockerfile improvements" (#3) This reverts commit 9763d885be9fca79481df065524107c86b69c915. --- .github/workflows/format-backend.yaml | 34 +- .github/workflows/format-build-frontend.yaml | 42 +- .gitignore | 2 +- CHANGELOG.md | 80 - Dockerfile | 17 +- Makefile | 2 - README.md | 4 +- backend/apps/audio/main.py | 24 +- backend/apps/images/main.py | 135 +- backend/apps/images/utils/comfyui.py | 234 - backend/apps/litellm/main.py | 75 +- backend/apps/ollama/main.py | 370 +- backend/apps/openai/main.py | 67 +- backend/apps/rag/main.py | 231 +- backend/apps/rag/utils.py | 19 +- backend/apps/web/internal/db.py | 7 +- backend/apps/web/main.py | 2 - backend/apps/web/models/auths.py | 10 +- backend/apps/web/models/chats.py | 14 + backend/apps/web/models/documents.py | 10 +- backend/apps/web/models/modelfiles.py | 24 +- backend/apps/web/models/prompts.py | 15 +- backend/apps/web/models/tags.py | 14 +- backend/apps/web/routers/auths.py | 14 +- backend/apps/web/routers/chats.py | 10 +- backend/apps/web/routers/configs.py | 8 +- backend/apps/web/routers/modelfiles.py | 45 +- backend/apps/web/routers/users.py | 8 +- backend/apps/web/routers/utils.py | 149 + backend/config.py | 76 +- backend/constants.py | 12 +- backend/data/config.json | 57 +- backend/main.py | 55 +- backend/requirements.txt | 1 - backend/start.sh | 5 - backend/utils/webhook.py | 54 - demo.gif | Bin 5192022 -> 6353949 bytes docs/CONTRIBUTING.md | 12 - i18next-parser.config.ts | 38 - .../helm/templates/ollama-statefulset.yaml | 2 +- kubernetes/helm/templates/webui-pvc.yaml | 2 +- kubernetes/helm/templates/webui-service.yaml | 17 +- kubernetes/helm/values.yaml | 2 - .../manifest/base/webui-deployment.yaml | 2 +- kubernetes/manifest/base/webui-pvc.yaml | 4 +- package-lock.json | 6053 ++++++++--------- package.json | 12 +- src/app.css | 4 - src/app.html | 43 +- src/lib/apis/images/index.ts | 10 +- src/lib/apis/index.ts | 57 - src/lib/apis/litellm/index.ts | 2 +- src/lib/apis/ollama/index.ts | 69 +- src/lib/apis/openai/index.ts | 50 - src/lib/components/AddFilesPlaceholder.svelte | 9 +- src/lib/components/ChangelogModal.svelte | 11 +- src/lib/components/admin/EditUserModal.svelte | 16 +- .../components/admin/Settings/Database.svelte | 10 +- .../components/admin/Settings/General.svelte | 58 +- .../components/admin/Settings/Users.svelte | 26 +- src/lib/components/admin/SettingsModal.svelte | 11 +- src/lib/components/chat/MessageInput.svelte | 38 +- .../chat/MessageInput/Documents.svelte | 12 +- .../chat/MessageInput/Models.svelte | 10 +- .../chat/MessageInput/PromptCommands.svelte | 11 +- src/lib/components/chat/Messages.svelte | 6 +- .../chat/Messages/Placeholder.svelte | 12 +- .../chat/Messages/ResponseMessage.svelte | 12 +- .../chat/Messages/UserMessage.svelte | 21 +- src/lib/components/chat/ModelSelector.svelte | 170 +- .../chat/ModelSelector/Selector.svelte | 389 -- src/lib/components/chat/Settings/About.svelte | 22 +- .../components/chat/Settings/Account.svelte | 16 +- .../Settings/Account/UpdatePassword.svelte | 17 +- .../components/chat/Settings/Advanced.svelte | 24 +- .../Settings/Advanced/AdvancedParams.svelte | 76 +- src/lib/components/chat/Settings/Audio.svelte | 58 +- src/lib/components/chat/Settings/Chats.svelte | 57 +- .../chat/Settings/Connections.svelte | 29 +- .../components/chat/Settings/General.svelte | 178 +- .../components/chat/Settings/Images.svelte | 161 +- .../components/chat/Settings/Interface.svelte | 109 +- .../components/chat/Settings/Models.svelte | 311 +- src/lib/components/chat/SettingsModal.svelte | 37 +- src/lib/components/chat/ShareChatModal.svelte | 9 +- src/lib/components/chat/ShortcutsModal.svelte | 21 +- src/lib/components/chat/TagChatModal.svelte | 20 - src/lib/components/common/Dropdown.svelte | 40 - src/lib/components/common/ImagePreview.svelte | 21 +- src/lib/components/common/Modal.svelte | 6 +- src/lib/components/common/Selector.svelte | 95 - src/lib/components/common/Tags.svelte | 3 +- .../components/common/Tags/TagInput.svelte | 55 +- src/lib/components/common/Tags/TagList.svelte | 2 +- src/lib/components/common/Tooltip.svelte | 2 +- .../components/documents/AddDocModal.svelte | 16 +- .../components/documents/EditDocModal.svelte | 14 +- .../documents/Settings/General.svelte | 244 +- .../components/documents/SettingsModal.svelte | 7 +- src/lib/components/icons/Check.svelte | 15 - src/lib/components/icons/ChevronDown.svelte | 15 - src/lib/components/icons/ChevronUpDown.svelte | 19 - .../components/icons/EllipsisVertical.svelte | 19 - src/lib/components/icons/GarbageBin.svelte | 19 - src/lib/components/icons/Pencil.svelte | 19 - src/lib/components/icons/Search.svelte | 19 - src/lib/components/layout/Navbar.svelte | 204 +- src/lib/components/layout/Navbar/Menu.svelte | 123 - src/lib/components/layout/Sidebar.svelte | 403 +- .../components/layout/Sidebar/ChatMenu.svelte | 56 - .../playground/ChatCompletion.svelte | 14 +- src/lib/constants.ts | 3 - src/lib/i18n/index.ts | 79 - src/lib/i18n/locales/bg-BG/translation.json | 363 - src/lib/i18n/locales/ca-ES/translation.json | 363 - src/lib/i18n/locales/de-DE/translation.json | 363 - src/lib/i18n/locales/en-US/translation.json | 363 - src/lib/i18n/locales/es-ES/translation.json | 363 - src/lib/i18n/locales/fa-IR/translation.json | 363 - src/lib/i18n/locales/fr-CA/translation.json | 363 - src/lib/i18n/locales/fr-FR/translation.json | 363 - src/lib/i18n/locales/it-IT/translation.json | 363 - src/lib/i18n/locales/ja-JP/translation.json | 363 - src/lib/i18n/locales/ko-KR/translation.json | 362 - src/lib/i18n/locales/languages.json | 74 - src/lib/i18n/locales/nl-NL/translation.json | 363 - src/lib/i18n/locales/pt-PT/translation.json | 363 - src/lib/i18n/locales/ru-RU/translation.json | 363 - src/lib/i18n/locales/uk-UA/translation.json | 363 - src/lib/i18n/locales/vi-VN/translation.json | 363 - src/lib/i18n/locales/zh-CN/translation.json | 363 - src/lib/i18n/locales/zh-TW/translation.json | 364 - src/lib/stores/index.ts | 3 +- src/lib/utils/index.ts | 30 - src/lib/utils/transitions/index.ts | 48 - src/routes/(app)/+layout.svelte | 26 +- src/routes/(app)/+page.svelte | 162 +- src/routes/(app)/admin/+page.svelte | 26 +- src/routes/(app)/c/[id]/+page.svelte | 155 +- src/routes/(app)/documents/+page.svelte | 33 +- src/routes/(app)/modelfiles/+page.svelte | 28 +- .../(app)/modelfiles/create/+page.svelte | 66 +- src/routes/(app)/modelfiles/edit/+page.svelte | 34 +- src/routes/(app)/playground/+page.svelte | 69 +- src/routes/(app)/prompts/+page.svelte | 22 +- src/routes/(app)/prompts/create/+page.svelte | 56 +- src/routes/(app)/prompts/edit/+page.svelte | 54 +- src/routes/+layout.svelte | 10 +- src/routes/auth/+page.svelte | 36 +- src/routes/error/+page.svelte | 21 +- src/tailwind.css | 6 +- static/manifest.json | 2 +- svelte.config.js | 6 - tailwind.config.js | 5 +- test.json | 6 + 155 files changed, 4795 insertions(+), 14501 deletions(-) delete mode 100644 backend/apps/images/utils/comfyui.py delete mode 100644 backend/utils/webhook.py delete mode 100644 i18next-parser.config.ts delete mode 100644 src/lib/components/chat/ModelSelector/Selector.svelte delete mode 100644 src/lib/components/chat/TagChatModal.svelte delete mode 100644 src/lib/components/common/Dropdown.svelte delete mode 100644 src/lib/components/common/Selector.svelte delete mode 100644 src/lib/components/icons/Check.svelte delete mode 100644 src/lib/components/icons/ChevronDown.svelte delete mode 100644 src/lib/components/icons/ChevronUpDown.svelte delete mode 100644 src/lib/components/icons/EllipsisVertical.svelte delete mode 100644 src/lib/components/icons/GarbageBin.svelte delete mode 100644 src/lib/components/icons/Pencil.svelte delete mode 100644 src/lib/components/icons/Search.svelte delete mode 100644 src/lib/components/layout/Navbar/Menu.svelte delete mode 100644 src/lib/components/layout/Sidebar/ChatMenu.svelte delete mode 100644 src/lib/i18n/index.ts delete mode 100644 src/lib/i18n/locales/bg-BG/translation.json delete mode 100644 src/lib/i18n/locales/ca-ES/translation.json delete mode 100644 src/lib/i18n/locales/de-DE/translation.json delete mode 100644 src/lib/i18n/locales/en-US/translation.json delete mode 100644 src/lib/i18n/locales/es-ES/translation.json delete mode 100644 src/lib/i18n/locales/fa-IR/translation.json delete mode 100644 src/lib/i18n/locales/fr-CA/translation.json delete mode 100644 src/lib/i18n/locales/fr-FR/translation.json delete mode 100644 src/lib/i18n/locales/it-IT/translation.json delete mode 100644 src/lib/i18n/locales/ja-JP/translation.json delete mode 100644 src/lib/i18n/locales/ko-KR/translation.json delete mode 100644 src/lib/i18n/locales/languages.json delete mode 100644 src/lib/i18n/locales/nl-NL/translation.json delete mode 100644 src/lib/i18n/locales/pt-PT/translation.json delete mode 100644 src/lib/i18n/locales/ru-RU/translation.json delete mode 100644 src/lib/i18n/locales/uk-UA/translation.json delete mode 100644 src/lib/i18n/locales/vi-VN/translation.json delete mode 100644 src/lib/i18n/locales/zh-CN/translation.json delete mode 100644 src/lib/i18n/locales/zh-TW/translation.json delete mode 100644 src/lib/utils/transitions/index.ts create mode 100644 test.json diff --git a/.github/workflows/format-backend.yaml b/.github/workflows/format-backend.yaml index dd0e9486..9d767fa5 100644 --- a/.github/workflows/format-backend.yaml +++ b/.github/workflows/format-backend.yaml @@ -1,39 +1,27 @@ name: Python CI - on: push: - branches: - - main - - dev + branches: ['main'] pull_request: - branches: - - main - - dev - jobs: build: name: 'Format Backend' + env: + PUBLIC_API_BASE_URL: '' runs-on: ubuntu-latest - strategy: matrix: - python-version: [3.11] - + node-version: + - latest steps: - uses: actions/checkout@v4 - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - + - name: Use Python + uses: actions/setup-python@v4 + - name: Use Bun + uses: oven-sh/setup-bun@v1 - name: Install dependencies run: | python -m pip install --upgrade pip - pip install black - + pip install yapf - name: Format backend - run: npm run format:backend - - - name: Check for changes after format - run: git diff --exit-code + run: bun run format:backend diff --git a/.github/workflows/format-build-frontend.yaml b/.github/workflows/format-build-frontend.yaml index 121266bf..65916cf4 100644 --- a/.github/workflows/format-build-frontend.yaml +++ b/.github/workflows/format-build-frontend.yaml @@ -1,36 +1,22 @@ -name: Frontend Build - +name: Bun CI on: push: - branches: - - main - - dev + branches: ['main'] pull_request: - branches: - - main - - dev - jobs: build: name: 'Format & Build Frontend' + env: + PUBLIC_API_BASE_URL: '' runs-on: ubuntu-latest steps: - - name: Checkout Repository - uses: actions/checkout@v4 - - - name: Setup Node.js - uses: actions/setup-node@v3 - with: - node-version: '20' # Or specify any other version you want to use - - - name: Install Dependencies - run: npm install - - - name: Format Frontend - run: npm run format - - - name: Check for Changes After Format - run: git diff --exit-code - - - name: Build Frontend - run: npm run build + - uses: actions/checkout@v4 + - name: Use Bun + uses: oven-sh/setup-bun@v1 + - run: bun --version + - name: Install frontend dependencies + run: bun install + - name: Format frontend + run: bun run format + - name: Build frontend + run: bun run build diff --git a/.gitignore b/.gitignore index 2ccac4d5..528e1f83 100644 --- a/.gitignore +++ b/.gitignore @@ -166,7 +166,7 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -.idea/ +#.idea/ # Logs logs diff --git a/CHANGELOG.md b/CHANGELOG.md index e48f8dc7..d57ba400 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,86 +5,6 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.1.116] - 2024-03-31 - -### Added - -- **🔄 Enhanced UI**: Model selector now conveniently located in the navbar, enabling seamless switching between multiple models during conversations. -- **🔍 Improved Model Selector**: Directly pull a model from the selector/Models now display detailed information for better understanding. -- **💬 Webhook Support**: Now compatible with Google Chat and Microsoft Teams. -- **🌐 Localization**: Korean translation (I18n) now available. -- **🌑 Dark Theme**: OLED dark theme introduced for reduced strain during prolonged usage. -- **🏷️ Tag Autocomplete**: Dropdown feature added for effortless chat tagging. - -### Fixed - -- **🔽 Auto-Scrolling**: Addressed OpenAI auto-scrolling issue. -- **🏷️ Tag Validation**: Implemented tag validation to prevent empty string tags. -- **🚫 Model Whitelisting**: Resolved LiteLLM model whitelisting issue. -- **✅ Spelling**: Corrected various spelling issues for improved readability. - -## [0.1.115] - 2024-03-24 - -### Added - -- **🔍 Custom Model Selector**: Easily find and select custom models with the new search filter feature. -- **🛑 Cancel Model Download**: Added the ability to cancel model downloads. -- **🎨 Image Generation ComfyUI**: Image generation now supports ComfyUI. -- **🌟 Updated Light Theme**: Updated the light theme for a fresh look. -- **🌍 Additional Language Support**: Now supporting Bulgarian, Italian, Portuguese, Japanese, and Dutch. - -### Fixed - -- **🔧 Fixed Broken Experimental GGUF Upload**: Resolved issues with experimental GGUF upload functionality. - -### Changed - -- **🔄 Vector Storage Reset Button**: Moved the reset vector storage button to document settings. - -## [0.1.114] - 2024-03-20 - -### Added - -- **🔗 Webhook Integration**: Now you can subscribe to new user sign-up events via webhook. Simply navigate to the admin panel > admin settings > webhook URL. -- **🛡️ Enhanced Model Filtering**: Alongside Ollama, OpenAI proxy model whitelisting, we've added model filtering functionality for LiteLLM proxy. -- **🌍 Expanded Language Support**: Spanish, Catalan, and Vietnamese languages are now available, with improvements made to others. - -### Fixed - -- **🔧 Input Field Spelling**: Resolved issue with spelling mistakes in input fields. -- **🖊️ Light Mode Styling**: Fixed styling issue with light mode in document adding. - -### Changed - -- **🔄 Language Sorting**: Languages are now sorted alphabetically by their code for improved organization. - -## [0.1.113] - 2024-03-18 - -### Added - -- 🌍 **Localization**: You can now change the UI language in Settings > General. We support Ukrainian, German, Farsi (Persian), Traditional and Simplified Chinese and French translations. You can help us to translate the UI into your language! More info in our [CONTRIBUTION.md](https://github.com/open-webui/open-webui/blob/main/docs/CONTRIBUTING.md#-translations-and-internationalization). -- 🎨 **System-wide Theme**: Introducing a new system-wide theme for enhanced visual experience. - -### Fixed - -- 🌑 **Dark Background on Select Fields**: Improved readability by adding a dark background to select fields, addressing issues on certain browsers/devices. -- **Multiple OPENAI_API_BASE_URLS Issue**: Resolved issue where multiple base URLs caused conflicts when one wasn't functioning. -- **RAG Encoding Issue**: Fixed encoding problem in RAG. -- **npm Audit Fix**: Addressed npm audit findings. -- **Reduced Scroll Threshold**: Improved auto-scroll experience by reducing the scroll threshold from 50px to 5px. - -### Changed - -- 🔄 **Sidebar UI Update**: Updated sidebar UI to feature a chat menu dropdown, replacing two icons for improved navigation. - -## [0.1.112] - 2024-03-15 - -### Fixed - -- 🗨️ Resolved chat malfunction after image generation. -- 🎨 Fixed various RAG issues. -- 🧪 Rectified experimental broken GGUF upload logic. - ## [0.1.111] - 2024-03-10 ### Added diff --git a/Dockerfile b/Dockerfile index 25608349..4573de78 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,8 +2,6 @@ # Initialize device type args # use build args in the docker build commmand with --build-arg="BUILDARG=true" ARG USE_CUDA=false -ARG USE_CUDA_VER=cu121 -ARG USE_EMBEDDING_MODEL=all-MiniLM-L6-v2 ARG USE_MPS=false ARG INCLUDE_OLLAMA=false @@ -30,9 +28,8 @@ RUN npm run build ######## WebUI backend ######## FROM python:3.11-slim-bookworm as base +# Use args ARG USE_CUDA -ARG USE_CUDA_VER -ARG USE_EMBEDDING_MODEL ARG USE_MPS ARG INCLUDE_OLLAMA @@ -42,9 +39,7 @@ ENV ENV=prod \ # pass build args to the build INCLUDE_OLLAMA_DOCKER=${INCLUDE_OLLAMA} \ USE_MPS_DOCKER=${USE_MPS} \ - USE_CUDA_DOCKER=${USE_CUDA} \ - USE_CUDA_DOCKER_VER=${USE_CUDA_VER} \ - USE_EMBEDDING_MODEL_DOCKER=${USE_EMBEDDING_MODEL} + USE_CUDA_DOCKER=${USE_CUDA} ## Basis URL Config ## ENV OLLAMA_BASE_URL="/ollama" \ @@ -66,7 +61,7 @@ ENV WHISPER_MODEL="base" \ # Leaderboard: https://huggingface.co/spaces/mteb/leaderboard # for better performance and multilangauge support use "intfloat/multilingual-e5-large" (~2.5GB) or "intfloat/multilingual-e5-base" (~1.5GB) # IMPORTANT: If you change the default model (all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them. -ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \ +ENV RAG_EMBEDDING_MODEL="all-MiniLM-L6-v2" \ RAG_EMBEDDING_MODEL_DIR="/app/backend/data/cache/embedding/models" \ SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models" \ # device type for whisper tts and embbeding models - "cpu" (default) or "mps" (apple silicon) - choosing this right can lead to better performance @@ -83,10 +78,8 @@ WORKDIR /app/backend COPY ./backend/requirements.txt ./requirements.txt RUN if [ "$USE_CUDA" = "true" ]; then \ - pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \ + pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 --no-cache-dir && \ pip3 install -r requirements.txt --no-cache-dir; \ - python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])" && \ - python -c "import os; from chromadb.utils import embedding_functions; sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=os.environ['RAG_EMBEDDING_MODEL'], device='cpu')"; \ elif [ "$USE_MPS" = "true" ]; then \ pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \ pip3 install -r requirements.txt --no-cache-dir && \ @@ -138,4 +131,4 @@ COPY ./backend . EXPOSE 8080 -CMD [ "bash", "start.sh"] +CMD [ "bash", "start.sh"] \ No newline at end of file diff --git a/Makefile b/Makefile index 1ec170a2..cbcc41d9 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,6 @@ remove: start: @docker-compose start -startAndBuild: - docker-compose up -d --build stop: @docker-compose stop diff --git a/README.md b/README.md index e2ee284e..46777dbf 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ [![Discord](https://img.shields.io/badge/Discord-Open_WebUI-blue?logo=discord&logoColor=white)](https://discord.gg/5rJgQTnV4s) [![](https://img.shields.io/static/v1?label=Sponsor&message=%E2%9D%A4&logo=GitHub&color=%23fe8e86)](https://github.com/sponsors/tjbck) -Open WebUI is an extensible, feature-rich, and user-friendly self-hosted WebUI designed to operate entirely offline. It supports various LLM runners, including Ollama and OpenAI-compatible APIs. For more information, be sure to check out our [Open WebUI Documentation](https://docs.openwebui.com/). +User-friendly WebUI for LLMs, supported LLM runners include Ollama and OpenAI-compatible APIs. For more information, be sure to check out our [Open WebUI Documentation](https://docs.openwebui.com/). ![Open WebUI Demo](./demo.gif) @@ -79,8 +79,6 @@ Open WebUI is an extensible, feature-rich, and user-friendly self-hosted WebUI d - 🔒 **Backend Reverse Proxy Support**: Bolster security through direct communication between Open WebUI backend and Ollama. This key feature eliminates the need to expose Ollama over LAN. Requests made to the '/ollama/api' route from the web UI are seamlessly redirected to Ollama from the backend, enhancing overall system security. -- 🌐🌍 **Multilingual Support**: Experience Open WebUI in your preferred language with our internationalization (i18n) support. Join us in expanding our supported languages! We're actively seeking contributors! - - 🌟 **Continuous Updates**: We are committed to improving Open WebUI with regular updates and new features. ## 🔗 Also Check Out Open WebUI Community! diff --git a/backend/apps/audio/main.py b/backend/apps/audio/main.py index dd6a0caf..2faf07aa 100644 --- a/backend/apps/audio/main.py +++ b/backend/apps/audio/main.py @@ -1,5 +1,4 @@ import os -import logging from fastapi import ( FastAPI, Request, @@ -22,24 +21,11 @@ from utils.utils import ( ) from utils.misc import calculate_sha256 -from config import ( - SRC_LOG_LEVELS, - CACHE_DIR, - UPLOAD_DIR, - WHISPER_MODEL, - WHISPER_MODEL_DIR, - DEVICE_TYPE, -) +from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR, DEVICE_TYPE -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["AUDIO"]) - -whisper_device_type = DEVICE_TYPE - -if whisper_device_type != "cuda": +if DEVICE_TYPE != "cuda": whisper_device_type = "cpu" -log.info(f"whisper_device_type: {whisper_device_type}") app = FastAPI() app.add_middleware( @@ -56,7 +42,7 @@ def transcribe( file: UploadFile = File(...), user=Depends(get_current_user), ): - log.info(f"file.content_type: {file.content_type}") + print(file.content_type) if file.content_type not in ["audio/mpeg", "audio/wav"]: raise HTTPException( @@ -80,7 +66,7 @@ def transcribe( ) segments, info = model.transcribe(file_path, beam_size=5) - log.info( + print( "Detected language '%s' with probability %f" % (info.language, info.language_probability) ) @@ -90,7 +76,7 @@ def transcribe( return {"text": transcript.strip()} except Exception as e: - log.exception(e) + print(e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index af8cbf7c..31bfc0f5 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -18,8 +18,6 @@ from utils.utils import ( get_current_user, get_admin_user, ) - -from apps.images.utils.comfyui import ImageGenerationPayload, comfyui_generate_image from utils.misc import calculate_sha256 from typing import Optional from pydantic import BaseModel @@ -27,14 +25,10 @@ from pathlib import Path import uuid import base64 import json -import logging -from config import SRC_LOG_LEVELS, CACHE_DIR, AUTOMATIC1111_BASE_URL, COMFYUI_BASE_URL +from config import CACHE_DIR, AUTOMATIC1111_BASE_URL -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["IMAGES"]) - IMAGE_CACHE_DIR = Path(CACHE_DIR).joinpath("./image/generations/") IMAGE_CACHE_DIR.mkdir(parents=True, exist_ok=True) @@ -55,8 +49,6 @@ app.state.MODEL = "" app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL -app.state.COMFYUI_BASE_URL = COMFYUI_BASE_URL - app.state.IMAGE_SIZE = "512x512" app.state.IMAGE_STEPS = 50 @@ -79,48 +71,32 @@ async def update_config(form_data: ConfigUpdateForm, user=Depends(get_admin_user return {"engine": app.state.ENGINE, "enabled": app.state.ENABLED} -class EngineUrlUpdateForm(BaseModel): - AUTOMATIC1111_BASE_URL: Optional[str] = None - COMFYUI_BASE_URL: Optional[str] = None +class UrlUpdateForm(BaseModel): + url: str @app.get("/url") -async def get_engine_url(user=Depends(get_admin_user)): - return { - "AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL, - "COMFYUI_BASE_URL": app.state.COMFYUI_BASE_URL, - } +async def get_automatic1111_url(user=Depends(get_admin_user)): + return {"AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL} @app.post("/url/update") -async def update_engine_url( - form_data: EngineUrlUpdateForm, user=Depends(get_admin_user) +async def update_automatic1111_url( + form_data: UrlUpdateForm, user=Depends(get_admin_user) ): - if form_data.AUTOMATIC1111_BASE_URL == None: + if form_data.url == "": app.state.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL else: - url = form_data.AUTOMATIC1111_BASE_URL.strip("/") + url = form_data.url.strip("/") try: r = requests.head(url) app.state.AUTOMATIC1111_BASE_URL = url except Exception as e: raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) - if form_data.COMFYUI_BASE_URL == None: - app.state.COMFYUI_BASE_URL = COMFYUI_BASE_URL - else: - url = form_data.COMFYUI_BASE_URL.strip("/") - - try: - r = requests.head(url) - app.state.COMFYUI_BASE_URL = url - except Exception as e: - raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) - return { "AUTOMATIC1111_BASE_URL": app.state.AUTOMATIC1111_BASE_URL, - "COMFYUI_BASE_URL": app.state.COMFYUI_BASE_URL, "status": True, } @@ -210,18 +186,6 @@ def get_models(user=Depends(get_current_user)): {"id": "dall-e-2", "name": "DALL·E 2"}, {"id": "dall-e-3", "name": "DALL·E 3"}, ] - elif app.state.ENGINE == "comfyui": - - r = requests.get(url=f"{app.state.COMFYUI_BASE_URL}/object_info") - info = r.json() - - return list( - map( - lambda model: {"id": model, "name": model}, - info["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0], - ) - ) - else: r = requests.get( url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/sd-models" @@ -243,8 +207,6 @@ async def get_default_model(user=Depends(get_admin_user)): try: if app.state.ENGINE == "openai": return {"model": app.state.MODEL if app.state.MODEL else "dall-e-2"} - elif app.state.ENGINE == "comfyui": - return {"model": app.state.MODEL if app.state.MODEL else ""} else: r = requests.get(url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/options") options = r.json() @@ -259,12 +221,10 @@ class UpdateModelForm(BaseModel): def set_model_handler(model: str): + if app.state.ENGINE == "openai": app.state.MODEL = model return app.state.MODEL - if app.state.ENGINE == "comfyui": - app.state.MODEL = model - return app.state.MODEL else: r = requests.get(url=f"{app.state.AUTOMATIC1111_BASE_URL}/sdapi/v1/options") options = r.json() @@ -308,24 +268,7 @@ def save_b64_image(b64_str): return image_id except Exception as e: - log.error(f"Error saving image: {e}") - return None - - -def save_url_image(url): - image_id = str(uuid.uuid4()) - file_path = IMAGE_CACHE_DIR.joinpath(f"{image_id}.png") - - try: - r = requests.get(url) - r.raise_for_status() - - with open(file_path, "wb") as image_file: - image_file.write(r.content) - - return image_id - except Exception as e: - log.exception(f"Error saving image: {e}") + print(f"Error saving image: {e}") return None @@ -335,8 +278,6 @@ def generate_image( user=Depends(get_current_user), ): - width, height = tuple(map(int, app.state.IMAGE_SIZE.split("x"))) - r = None try: if app.state.ENGINE == "openai": @@ -352,7 +293,6 @@ def generate_image( "size": form_data.size if form_data.size else app.state.IMAGE_SIZE, "response_format": "b64_json", } - r = requests.post( url=f"https://api.openai.com/v1/images/generations", json=data, @@ -360,6 +300,7 @@ def generate_image( ) r.raise_for_status() + res = r.json() images = [] @@ -374,47 +315,12 @@ def generate_image( return images - elif app.state.ENGINE == "comfyui": - - data = { - "prompt": form_data.prompt, - "width": width, - "height": height, - "n": form_data.n, - } - - if app.state.IMAGE_STEPS != None: - data["steps"] = app.state.IMAGE_STEPS - - if form_data.negative_prompt != None: - data["negative_prompt"] = form_data.negative_prompt - - data = ImageGenerationPayload(**data) - - res = comfyui_generate_image( - app.state.MODEL, - data, - user.id, - app.state.COMFYUI_BASE_URL, - ) - log.debug(f"res: {res}") - - images = [] - - for image in res["data"]: - image_id = save_url_image(image["url"]) - images.append({"url": f"/cache/image/generations/{image_id}.png"}) - file_body_path = IMAGE_CACHE_DIR.joinpath(f"{image_id}.json") - - with open(file_body_path, "w") as f: - json.dump(data.model_dump(exclude_none=True), f) - - log.debug(f"images: {images}") - return images else: if form_data.model: set_model_handler(form_data.model) + width, height = tuple(map(int, app.state.IMAGE_SIZE.split("x"))) + data = { "prompt": form_data.prompt, "batch_size": form_data.n, @@ -435,7 +341,7 @@ def generate_image( res = r.json() - log.debug(f"res: {res}") + print(res) images = [] @@ -450,10 +356,7 @@ def generate_image( return images except Exception as e: - error = e - - if r != None: - data = r.json() - if "error" in data: - error = data["error"]["message"] - raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error)) + print(e) + if r: + print(r.json()) + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) diff --git a/backend/apps/images/utils/comfyui.py b/backend/apps/images/utils/comfyui.py deleted file mode 100644 index 39333325..00000000 --- a/backend/apps/images/utils/comfyui.py +++ /dev/null @@ -1,234 +0,0 @@ -import websocket # NOTE: websocket-client (https://github.com/websocket-client/websocket-client) -import uuid -import json -import urllib.request -import urllib.parse -import random -import logging - -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["COMFYUI"]) - -from pydantic import BaseModel - -from typing import Optional - -COMFYUI_DEFAULT_PROMPT = """ -{ - "3": { - "inputs": { - "seed": 0, - "steps": 20, - "cfg": 8, - "sampler_name": "euler", - "scheduler": "normal", - "denoise": 1, - "model": [ - "4", - 0 - ], - "positive": [ - "6", - 0 - ], - "negative": [ - "7", - 0 - ], - "latent_image": [ - "5", - 0 - ] - }, - "class_type": "KSampler", - "_meta": { - "title": "KSampler" - } - }, - "4": { - "inputs": { - "ckpt_name": "model.safetensors" - }, - "class_type": "CheckpointLoaderSimple", - "_meta": { - "title": "Load Checkpoint" - } - }, - "5": { - "inputs": { - "width": 512, - "height": 512, - "batch_size": 1 - }, - "class_type": "EmptyLatentImage", - "_meta": { - "title": "Empty Latent Image" - } - }, - "6": { - "inputs": { - "text": "Prompt", - "clip": [ - "4", - 1 - ] - }, - "class_type": "CLIPTextEncode", - "_meta": { - "title": "CLIP Text Encode (Prompt)" - } - }, - "7": { - "inputs": { - "text": "Negative Prompt", - "clip": [ - "4", - 1 - ] - }, - "class_type": "CLIPTextEncode", - "_meta": { - "title": "CLIP Text Encode (Prompt)" - } - }, - "8": { - "inputs": { - "samples": [ - "3", - 0 - ], - "vae": [ - "4", - 2 - ] - }, - "class_type": "VAEDecode", - "_meta": { - "title": "VAE Decode" - } - }, - "9": { - "inputs": { - "filename_prefix": "ComfyUI", - "images": [ - "8", - 0 - ] - }, - "class_type": "SaveImage", - "_meta": { - "title": "Save Image" - } - } -} -""" - - -def queue_prompt(prompt, client_id, base_url): - log.info("queue_prompt") - p = {"prompt": prompt, "client_id": client_id} - data = json.dumps(p).encode("utf-8") - req = urllib.request.Request(f"{base_url}/prompt", data=data) - return json.loads(urllib.request.urlopen(req).read()) - - -def get_image(filename, subfolder, folder_type, base_url): - log.info("get_image") - data = {"filename": filename, "subfolder": subfolder, "type": folder_type} - url_values = urllib.parse.urlencode(data) - with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response: - return response.read() - - -def get_image_url(filename, subfolder, folder_type, base_url): - log.info("get_image") - data = {"filename": filename, "subfolder": subfolder, "type": folder_type} - url_values = urllib.parse.urlencode(data) - return f"{base_url}/view?{url_values}" - - -def get_history(prompt_id, base_url): - log.info("get_history") - with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response: - return json.loads(response.read()) - - -def get_images(ws, prompt, client_id, base_url): - prompt_id = queue_prompt(prompt, client_id, base_url)["prompt_id"] - output_images = [] - while True: - out = ws.recv() - if isinstance(out, str): - message = json.loads(out) - if message["type"] == "executing": - data = message["data"] - if data["node"] is None and data["prompt_id"] == prompt_id: - break # Execution is done - else: - continue # previews are binary data - - history = get_history(prompt_id, base_url)[prompt_id] - for o in history["outputs"]: - for node_id in history["outputs"]: - node_output = history["outputs"][node_id] - if "images" in node_output: - for image in node_output["images"]: - url = get_image_url( - image["filename"], image["subfolder"], image["type"], base_url - ) - output_images.append({"url": url}) - return {"data": output_images} - - -class ImageGenerationPayload(BaseModel): - prompt: str - negative_prompt: Optional[str] = "" - steps: Optional[int] = None - seed: Optional[int] = None - width: int - height: int - n: int = 1 - - -def comfyui_generate_image( - model: str, payload: ImageGenerationPayload, client_id, base_url -): - host = base_url.replace("http://", "").replace("https://", "") - - comfyui_prompt = json.loads(COMFYUI_DEFAULT_PROMPT) - - comfyui_prompt["4"]["inputs"]["ckpt_name"] = model - comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n - comfyui_prompt["5"]["inputs"]["width"] = payload.width - comfyui_prompt["5"]["inputs"]["height"] = payload.height - - # set the text prompt for our positive CLIPTextEncode - comfyui_prompt["6"]["inputs"]["text"] = payload.prompt - comfyui_prompt["7"]["inputs"]["text"] = payload.negative_prompt - - if payload.steps: - comfyui_prompt["3"]["inputs"]["steps"] = payload.steps - - comfyui_prompt["3"]["inputs"]["seed"] = ( - payload.seed if payload.seed else random.randint(0, 18446744073709551614) - ) - - try: - ws = websocket.WebSocket() - ws.connect(f"ws://{host}/ws?clientId={client_id}") - log.info("WebSocket connection established.") - except Exception as e: - log.exception(f"Failed to connect to WebSocket server: {e}") - return None - - try: - images = get_images(ws, comfyui_prompt, client_id, base_url) - except Exception as e: - log.exception(f"Error while receiving images: {e}") - images = None - - ws.close() - - return images diff --git a/backend/apps/litellm/main.py b/backend/apps/litellm/main.py index a9922aad..21b9e58a 100644 --- a/backend/apps/litellm/main.py +++ b/backend/apps/litellm/main.py @@ -1,27 +1,10 @@ -import logging - from litellm.proxy.proxy_server import ProxyConfig, initialize from litellm.proxy.proxy_server import app -from fastapi import FastAPI, Request, Depends, status, Response +from fastapi import FastAPI, Request, Depends, status from fastapi.responses import JSONResponse - -from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint -from starlette.responses import StreamingResponse -import json - from utils.utils import get_http_authorization_cred, get_current_user -from config import SRC_LOG_LEVELS, ENV - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["LITELLM"]) - - -from config import ( - MODEL_FILTER_ENABLED, - MODEL_FILTER_LIST, -) - +from config import ENV proxy_config = ProxyConfig() @@ -43,58 +26,16 @@ async def on_startup(): await startup() -app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED -app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST - - @app.middleware("http") async def auth_middleware(request: Request, call_next): auth_header = request.headers.get("Authorization", "") - request.state.user = None - try: - user = get_current_user(get_http_authorization_cred(auth_header)) - log.debug(f"user: {user}") - request.state.user = user - except Exception as e: - return JSONResponse(status_code=400, content={"detail": str(e)}) + if ENV != "dev": + try: + user = get_current_user(get_http_authorization_cred(auth_header)) + print(user) + except Exception as e: + return JSONResponse(status_code=400, content={"detail": str(e)}) response = await call_next(request) return response - - -class ModifyModelsResponseMiddleware(BaseHTTPMiddleware): - async def dispatch( - self, request: Request, call_next: RequestResponseEndpoint - ) -> Response: - - response = await call_next(request) - user = request.state.user - - if "/models" in request.url.path: - if isinstance(response, StreamingResponse): - # Read the content of the streaming response - body = b"" - async for chunk in response.body_iterator: - body += chunk - - data = json.loads(body.decode("utf-8")) - - if app.state.MODEL_FILTER_ENABLED: - if user and user.role == "user": - data["data"] = list( - filter( - lambda model: model["id"] - in app.state.MODEL_FILTER_LIST, - data["data"], - ) - ) - - # Modified Flag - data["modified"] = True - return JSONResponse(content=data) - - return response - - -app.add_middleware(ModifyModelsResponseMiddleware) diff --git a/backend/apps/ollama/main.py b/backend/apps/ollama/main.py index 818b53d9..5ecbaa29 100644 --- a/backend/apps/ollama/main.py +++ b/backend/apps/ollama/main.py @@ -1,49 +1,24 @@ -from fastapi import ( - FastAPI, - Request, - Response, - HTTPException, - Depends, - status, - UploadFile, - File, - BackgroundTasks, -) +from fastapi import FastAPI, Request, Response, HTTPException, Depends, status from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from fastapi.concurrency import run_in_threadpool from pydantic import BaseModel, ConfigDict -import os -import copy import random import requests import json import uuid import aiohttp import asyncio -import logging -from urllib.parse import urlparse -from typing import Optional, List, Union - from apps.web.models.users import Users from constants import ERROR_MESSAGES from utils.utils import decode_token, get_current_user, get_admin_user +from config import OLLAMA_BASE_URLS, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST +from typing import Optional, List, Union -from config import ( - SRC_LOG_LEVELS, - OLLAMA_BASE_URLS, - MODEL_FILTER_ENABLED, - MODEL_FILTER_LIST, - UPLOAD_DIR, -) -from utils.misc import calculate_sha256 - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["OLLAMA"]) app = FastAPI() app.add_middleware( @@ -94,7 +69,7 @@ class UrlUpdateForm(BaseModel): async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)): app.state.OLLAMA_BASE_URLS = form_data.urls - log.info(f"app.state.OLLAMA_BASE_URLS: {app.state.OLLAMA_BASE_URLS}") + print(app.state.OLLAMA_BASE_URLS) return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS} @@ -115,7 +90,7 @@ async def fetch_url(url): return await response.json() except Exception as e: # Handle connection error here - log.error(f"Connection error: {e}") + print(f"Connection error: {e}") return None @@ -123,14 +98,13 @@ def merge_models_lists(model_lists): merged_models = {} for idx, model_list in enumerate(model_lists): - if model_list is not None: - for model in model_list: - digest = model["digest"] - if digest not in merged_models: - model["urls"] = [idx] - merged_models[digest] = model - else: - merged_models[digest]["urls"].append(idx) + for model in model_list: + digest = model["digest"] + if digest not in merged_models: + model["urls"] = [idx] + merged_models[digest] = model + else: + merged_models[digest]["urls"].append(idx) return list(merged_models.values()) @@ -139,16 +113,16 @@ def merge_models_lists(model_lists): async def get_all_models(): - log.info("get_all_models()") + print("get_all_models") tasks = [fetch_url(f"{url}/api/tags") for url in app.state.OLLAMA_BASE_URLS] responses = await asyncio.gather(*tasks) + responses = list(filter(lambda x: x is not None, responses)) models = { "models": merge_models_lists( - map(lambda response: response["models"] if response else None, responses) + map(lambda response: response["models"], responses) ) } - app.state.MODELS = {model["model"]: model for model in models["models"]} return models @@ -180,7 +154,7 @@ async def get_ollama_tags( return r.json() except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -207,17 +181,11 @@ async def get_ollama_versions(url_idx: Optional[int] = None): responses = await asyncio.gather(*tasks) responses = list(filter(lambda x: x is not None, responses)) - if len(responses) > 0: - lowest_version = min( - responses, key=lambda x: tuple(map(int, x["version"].split("."))) - ) + lowest_version = min( + responses, key=lambda x: tuple(map(int, x["version"].split("."))) + ) - return {"version": lowest_version["version"]} - else: - raise HTTPException( - status_code=500, - detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND, - ) + return {"version": lowest_version["version"]} else: url = app.state.OLLAMA_BASE_URLS[url_idx] try: @@ -226,7 +194,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None): return r.json() except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -252,33 +220,18 @@ async def pull_model( form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user) ): url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) r = None def get_request(): nonlocal url nonlocal r - - request_id = str(uuid.uuid4()) try: - REQUEST_POOL.append(request_id) def stream_content(): - try: - yield json.dumps({"id": request_id, "done": False}) + "\n" - - for chunk in r.iter_content(chunk_size=8192): - if request_id in REQUEST_POOL: - yield chunk - else: - log.warning("User: canceled request") - break - finally: - if hasattr(r, "close"): - r.close() - if request_id in REQUEST_POOL: - REQUEST_POOL.remove(request_id) + for chunk in r.iter_content(chunk_size=8192): + yield chunk r = requests.request( method="POST", @@ -299,9 +252,8 @@ async def pull_model( try: return await run_in_threadpool(get_request) - except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -340,7 +292,7 @@ async def push_model( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.debug(f"url: {url}") + print(url) r = None @@ -372,7 +324,7 @@ async def push_model( try: return await run_in_threadpool(get_request) except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -400,9 +352,9 @@ class CreateModelForm(BaseModel): async def create_model( form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user) ): - log.debug(f"form_data: {form_data}") + print(form_data) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) r = None @@ -424,7 +376,7 @@ async def create_model( r.raise_for_status() - log.debug(f"r: {r}") + print(r) return StreamingResponse( stream_content(), @@ -437,7 +389,7 @@ async def create_model( try: return await run_in_threadpool(get_request) except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -475,7 +427,7 @@ async def copy_model( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) try: r = requests.request( @@ -485,11 +437,11 @@ async def copy_model( ) r.raise_for_status() - log.debug(f"r.text: {r.text}") + print(r.text) return True except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -522,7 +474,7 @@ async def delete_model( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) try: r = requests.request( @@ -532,11 +484,11 @@ async def delete_model( ) r.raise_for_status() - log.debug(f"r.text: {r.text}") + print(r.text) return True except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -562,7 +514,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use url_idx = random.choice(app.state.MODELS[form_data.name]["urls"]) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) try: r = requests.request( @@ -574,7 +526,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_use return r.json() except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -614,7 +566,7 @@ async def generate_embeddings( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) try: r = requests.request( @@ -626,7 +578,7 @@ async def generate_embeddings( return r.json() except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -670,11 +622,11 @@ async def generate_completion( else: raise HTTPException( status_code=400, - detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model), + detail="error_detail", ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) r = None @@ -695,7 +647,7 @@ async def generate_completion( if request_id in REQUEST_POOL: yield chunk else: - log.warning("User: canceled request") + print("User: canceled request") break finally: if hasattr(r, "close"): @@ -750,7 +702,7 @@ class GenerateChatCompletionForm(BaseModel): format: Optional[str] = None options: Optional[dict] = None template: Optional[str] = None - stream: Optional[bool] = None + stream: Optional[bool] = True keep_alive: Optional[Union[int, str]] = None @@ -772,15 +724,11 @@ async def generate_chat_completion( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) r = None - log.debug( - "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format( - form_data.model_dump_json(exclude_none=True).encode() - ) - ) + print(form_data.model_dump_json(exclude_none=True).encode()) def get_request(): nonlocal form_data @@ -799,7 +747,7 @@ async def generate_chat_completion( if request_id in REQUEST_POOL: yield chunk else: - log.warning("User: canceled request") + print("User: canceled request") break finally: if hasattr(r, "close"): @@ -822,7 +770,7 @@ async def generate_chat_completion( headers=dict(r.headers), ) except Exception as e: - log.exception(e) + print(e) raise e try: @@ -876,7 +824,7 @@ async def generate_openai_chat_completion( ) url = app.state.OLLAMA_BASE_URLS[url_idx] - log.info(f"url: {url}") + print(url) r = None @@ -899,7 +847,7 @@ async def generate_openai_chat_completion( if request_id in REQUEST_POOL: yield chunk else: - log.warning("User: canceled request") + print("User: canceled request") break finally: if hasattr(r, "close"): @@ -942,220 +890,6 @@ async def generate_openai_chat_completion( ) -class UrlForm(BaseModel): - url: str - - -class UploadBlobForm(BaseModel): - filename: str - - -def parse_huggingface_url(hf_url): - try: - # Parse the URL - parsed_url = urlparse(hf_url) - - # Get the path and split it into components - path_components = parsed_url.path.split("/") - - # Extract the desired output - user_repo = "/".join(path_components[1:3]) - model_file = path_components[-1] - - return model_file - except ValueError: - return None - - -async def download_file_stream( - ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024 -): - done = False - - if os.path.exists(file_path): - current_size = os.path.getsize(file_path) - else: - current_size = 0 - - headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {} - - timeout = aiohttp.ClientTimeout(total=600) # Set the timeout - - async with aiohttp.ClientSession(timeout=timeout) as session: - async with session.get(file_url, headers=headers) as response: - total_size = int(response.headers.get("content-length", 0)) + current_size - - with open(file_path, "ab+") as file: - async for data in response.content.iter_chunked(chunk_size): - current_size += len(data) - file.write(data) - - done = current_size == total_size - progress = round((current_size / total_size) * 100, 2) - - yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n' - - if done: - file.seek(0) - hashed = calculate_sha256(file) - file.seek(0) - - url = f"{ollama_url}/api/blobs/sha256:{hashed}" - response = requests.post(url, data=file) - - if response.ok: - res = { - "done": done, - "blob": f"sha256:{hashed}", - "name": file_name, - } - os.remove(file_path) - - yield f"data: {json.dumps(res)}\n\n" - else: - raise "Ollama: Could not create blob, Please try again." - - -# def number_generator(): -# for i in range(1, 101): -# yield f"data: {i}\n" - - -# url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf" -@app.post("/models/download") -@app.post("/models/download/{url_idx}") -async def download_model( - form_data: UrlForm, - url_idx: Optional[int] = None, -): - - allowed_hosts = ["https://huggingface.co/", "https://github.com/"] - - if not any(form_data.url.startswith(host) for host in allowed_hosts): - raise HTTPException( - status_code=400, - detail="Invalid file_url. Only URLs from allowed hosts are permitted.", - ) - - if url_idx == None: - url_idx = 0 - url = app.state.OLLAMA_BASE_URLS[url_idx] - - file_name = parse_huggingface_url(form_data.url) - - if file_name: - file_path = f"{UPLOAD_DIR}/{file_name}" - - return StreamingResponse( - download_file_stream(url, form_data.url, file_path, file_name), - ) - else: - return None - - -@app.post("/models/upload") -@app.post("/models/upload/{url_idx}") -def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None): - if url_idx == None: - url_idx = 0 - ollama_url = app.state.OLLAMA_BASE_URLS[url_idx] - - file_path = f"{UPLOAD_DIR}/{file.filename}" - - # Save file in chunks - with open(file_path, "wb+") as f: - for chunk in file.file: - f.write(chunk) - - def file_process_stream(): - nonlocal ollama_url - total_size = os.path.getsize(file_path) - chunk_size = 1024 * 1024 - try: - with open(file_path, "rb") as f: - total = 0 - done = False - - while not done: - chunk = f.read(chunk_size) - if not chunk: - done = True - continue - - total += len(chunk) - progress = round((total / total_size) * 100, 2) - - res = { - "progress": progress, - "total": total_size, - "completed": total, - } - yield f"data: {json.dumps(res)}\n\n" - - if done: - f.seek(0) - hashed = calculate_sha256(f) - f.seek(0) - - url = f"{ollama_url}/api/blobs/sha256:{hashed}" - response = requests.post(url, data=f) - - if response.ok: - res = { - "done": done, - "blob": f"sha256:{hashed}", - "name": file.filename, - } - os.remove(file_path) - yield f"data: {json.dumps(res)}\n\n" - else: - raise Exception( - "Ollama: Could not create blob, Please try again." - ) - - except Exception as e: - res = {"error": str(e)} - yield f"data: {json.dumps(res)}\n\n" - - return StreamingResponse(file_process_stream(), media_type="text/event-stream") - - -# async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None): -# if url_idx == None: -# url_idx = 0 -# url = app.state.OLLAMA_BASE_URLS[url_idx] - -# file_location = os.path.join(UPLOAD_DIR, file.filename) -# total_size = file.size - -# async def file_upload_generator(file): -# print(file) -# try: -# async with aiofiles.open(file_location, "wb") as f: -# completed_size = 0 -# while True: -# chunk = await file.read(1024*1024) -# if not chunk: -# break -# await f.write(chunk) -# completed_size += len(chunk) -# progress = (completed_size / total_size) * 100 - -# print(progress) -# yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n' -# except Exception as e: -# print(e) -# yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n" -# finally: -# await file.close() -# print("done") -# yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n' - -# return StreamingResponse( -# file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream" -# ) - - @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) async def deprecated_proxy(path: str, request: Request, user=Depends(get_current_user)): url = app.state.OLLAMA_BASE_URLS[0] @@ -1206,7 +940,7 @@ async def deprecated_proxy(path: str, request: Request, user=Depends(get_current if request_id in REQUEST_POOL: yield chunk else: - log.warning("User: canceled request") + print("User: canceled request") break finally: if hasattr(r, "close"): diff --git a/backend/apps/openai/main.py b/backend/apps/openai/main.py index 4098d73a..375ed3f1 100644 --- a/backend/apps/openai/main.py +++ b/backend/apps/openai/main.py @@ -6,7 +6,6 @@ import requests import aiohttp import asyncio import json -import logging from pydantic import BaseModel @@ -20,7 +19,6 @@ from utils.utils import ( get_admin_user, ) from config import ( - SRC_LOG_LEVELS, OPENAI_API_BASE_URLS, OPENAI_API_KEYS, CACHE_DIR, @@ -33,9 +31,6 @@ from typing import List, Optional import hashlib from pathlib import Path -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["OPENAI"]) - app = FastAPI() app.add_middleware( CORSMiddleware, @@ -116,7 +111,6 @@ async def speech(request: Request, user=Depends(get_verified_user)): headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEYS[idx]}" headers["Content-Type"] = "application/json" - r = None try: r = requests.post( url=f"{app.state.OPENAI_API_BASE_URLS[idx]}/audio/speech", @@ -139,7 +133,7 @@ async def speech(request: Request, user=Depends(get_verified_user)): return FileResponse(file_path) except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -149,9 +143,7 @@ async def speech(request: Request, user=Depends(get_verified_user)): except: error_detail = f"External: {e}" - raise HTTPException( - status_code=r.status_code if r else 500, detail=error_detail - ) + raise HTTPException(status_code=r.status_code, detail=error_detail) except ValueError: raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND) @@ -165,7 +157,7 @@ async def fetch_url(url, key): return await response.json() except Exception as e: # Handle connection error here - log.error(f"Connection error: {e}") + print(f"Connection error: {e}") return None @@ -173,21 +165,20 @@ def merge_models_lists(model_lists): merged_list = [] for idx, models in enumerate(model_lists): - if models is not None and "error" not in models: - merged_list.extend( - [ - {**model, "urlIdx": idx} - for model in models - if "api.openai.com" not in app.state.OPENAI_API_BASE_URLS[idx] - or "gpt" in model["id"] - ] - ) + merged_list.extend( + [ + {**model, "urlIdx": idx} + for model in models + if "api.openai.com" not in app.state.OPENAI_API_BASE_URLS[idx] + or "gpt" in model["id"] + ] + ) return merged_list async def get_all_models(): - log.info("get_all_models()") + print("get_all_models") if len(app.state.OPENAI_API_KEYS) == 1 and app.state.OPENAI_API_KEYS[0] == "": models = {"data": []} @@ -196,24 +187,15 @@ async def get_all_models(): fetch_url(f"{url}/models", app.state.OPENAI_API_KEYS[idx]) for idx, url in enumerate(app.state.OPENAI_API_BASE_URLS) ] - responses = await asyncio.gather(*tasks) + responses = list( + filter(lambda x: x is not None and "error" not in x, responses) + ) models = { "data": merge_models_lists( - list( - map( - lambda response: ( - response["data"] - if response and "data" in response - else None - ), - responses, - ) - ) + list(map(lambda response: response["data"], responses)) ) } - - log.info(f"models: {models}") app.state.MODELS = {model["id"]: model for model in models["data"]} return models @@ -236,9 +218,6 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use return models else: url = app.state.OPENAI_API_BASE_URLS[url_idx] - - r = None - try: r = requests.request(method="GET", url=f"{url}/models") r.raise_for_status() @@ -251,7 +230,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use return response_data except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -285,7 +264,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): if body.get("model") == "gpt-4-vision-preview": if "max_tokens" not in body: body["max_tokens"] = 4000 - log.debug("Modified body_dict:", body) + print("Modified body_dict:", body) # Fix for ChatGPT calls failing because the num_ctx key is in body if "num_ctx" in body: @@ -297,7 +276,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): # Convert the modified body back to JSON body = json.dumps(body) except json.JSONDecodeError as e: - log.error("Error loading request body into a dictionary:", e) + print("Error loading request body into a dictionary:", e) url = app.state.OPENAI_API_BASE_URLS[idx] key = app.state.OPENAI_API_KEYS[idx] @@ -311,8 +290,6 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): headers["Authorization"] = f"Bearer {key}" headers["Content-Type"] = "application/json" - r = None - try: r = requests.request( method=request.method, @@ -335,7 +312,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): response_data = r.json() return response_data except Exception as e: - log.exception(e) + print(e) error_detail = "Open WebUI: Server Connection Error" if r is not None: try: @@ -345,6 +322,4 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)): except: error_detail = f"External: {e}" - raise HTTPException( - status_code=r.status_code if r else 500, detail=error_detail - ) + raise HTTPException(status_code=r.status_code, detail=error_detail) diff --git a/backend/apps/rag/main.py b/backend/apps/rag/main.py index f49e991e..82d2d28b 100644 --- a/backend/apps/rag/main.py +++ b/backend/apps/rag/main.py @@ -8,7 +8,7 @@ from fastapi import ( Form, ) from fastapi.middleware.cors import CORSMiddleware -import os, shutil, logging +import os, shutil from pathlib import Path from typing import List @@ -21,7 +21,6 @@ from langchain_community.document_loaders import ( TextLoader, PyPDFLoader, CSVLoader, - BSHTMLLoader, Docx2txtLoader, UnstructuredEPubLoader, UnstructuredWordDocumentLoader, @@ -55,7 +54,6 @@ from utils.misc import ( ) from utils.utils import get_current_user, get_admin_user from config import ( - SRC_LOG_LEVELS, UPLOAD_DIR, DOCS_DIR, RAG_EMBEDDING_MODEL, @@ -68,9 +66,6 @@ from config import ( from constants import ERROR_MESSAGES -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["RAG"]) - # # if RAG_EMBEDDING_MODEL: # sentence_transformer_ef = SentenceTransformer( @@ -116,6 +111,39 @@ class StoreWebForm(CollectionNameForm): url: str +def store_data_in_vector_db(data, collection_name, overwrite: bool = False) -> bool: + text_splitter = RecursiveCharacterTextSplitter( + chunk_size=app.state.CHUNK_SIZE, chunk_overlap=app.state.CHUNK_OVERLAP + ) + docs = text_splitter.split_documents(data) + + texts = [doc.page_content for doc in docs] + metadatas = [doc.metadata for doc in docs] + + try: + if overwrite: + for collection in CHROMA_CLIENT.list_collections(): + if collection_name == collection.name: + print(f"deleting existing collection {collection_name}") + CHROMA_CLIENT.delete_collection(name=collection_name) + + collection = CHROMA_CLIENT.create_collection( + name=collection_name, + embedding_function=app.state.sentence_transformer_ef, + ) + + collection.add( + documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts] + ) + return True + except Exception as e: + print(e) + if e.__class__.__name__ == "UniqueConstraintError": + return True + + return False + + @app.get("/") async def get_status(): return { @@ -245,7 +273,7 @@ def query_doc_handler( embedding_function=app.state.sentence_transformer_ef, ) except Exception as e: - log.exception(e) + print(e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT(e), @@ -289,69 +317,13 @@ def store_web(form_data: StoreWebForm, user=Depends(get_current_user)): "filename": form_data.url, } except Exception as e: - log.exception(e) + print(e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT(e), ) -def store_data_in_vector_db(data, collection_name, overwrite: bool = False) -> bool: - - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=app.state.CHUNK_SIZE, - chunk_overlap=app.state.CHUNK_OVERLAP, - add_start_index=True, - ) - docs = text_splitter.split_documents(data) - - if len(docs) > 0: - return store_docs_in_vector_db(docs, collection_name, overwrite), None - else: - raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT) - - -def store_text_in_vector_db( - text, metadata, collection_name, overwrite: bool = False -) -> bool: - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=app.state.CHUNK_SIZE, - chunk_overlap=app.state.CHUNK_OVERLAP, - add_start_index=True, - ) - docs = text_splitter.create_documents([text], metadatas=[metadata]) - return store_docs_in_vector_db(docs, collection_name, overwrite) - - -def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> bool: - - texts = [doc.page_content for doc in docs] - metadatas = [doc.metadata for doc in docs] - - try: - if overwrite: - for collection in CHROMA_CLIENT.list_collections(): - if collection_name == collection.name: - log.info(f"deleting existing collection {collection_name}") - CHROMA_CLIENT.delete_collection(name=collection_name) - - collection = CHROMA_CLIENT.create_collection( - name=collection_name, - embedding_function=app.state.sentence_transformer_ef, - ) - - collection.add( - documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts] - ) - return True - except Exception as e: - log.exception(e) - if e.__class__.__name__ == "UniqueConstraintError": - return True - - return False - - def get_loader(filename: str, file_content_type: str, file_path: str): file_ext = filename.split(".")[-1].lower() known_type = True @@ -409,8 +381,6 @@ def get_loader(filename: str, file_content_type: str, file_path: str): loader = UnstructuredRSTLoader(file_path, mode="elements") elif file_ext == "xml": loader = UnstructuredXMLLoader(file_path) - elif file_ext in ["htm", "html"]: - loader = BSHTMLLoader(file_path, open_encoding="unicode_escape") elif file_ext == "md": loader = UnstructuredMarkdownLoader(file_path) elif file_content_type == "application/epub+zip": @@ -429,9 +399,9 @@ def get_loader(filename: str, file_content_type: str, file_path: str): elif file_ext in known_source_ext or ( file_content_type and file_content_type.find("text/") >= 0 ): - loader = TextLoader(file_path, autodetect_encoding=True) + loader = TextLoader(file_path) else: - loader = TextLoader(file_path, autodetect_encoding=True) + loader = TextLoader(file_path) known_type = False return loader, known_type @@ -445,7 +415,7 @@ def store_doc( ): # "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm" - log.info(f"file.content_type: {file.content_type}") + print(file.content_type) try: filename = file.filename file_path = f"{UPLOAD_DIR}/{filename}" @@ -461,24 +431,22 @@ def store_doc( loader, known_type = get_loader(file.filename, file.content_type, file_path) data = loader.load() + result = store_data_in_vector_db(data, collection_name) - try: - result = store_data_in_vector_db(data, collection_name) - - if result: - return { - "status": True, - "collection_name": collection_name, - "filename": filename, - "known_type": known_type, - } - except Exception as e: + if result: + return { + "status": True, + "collection_name": collection_name, + "filename": filename, + "known_type": known_type, + } + else: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=e, + detail=ERROR_MESSAGES.DEFAULT(), ) except Exception as e: - log.exception(e) + print(e) if "No pandoc was found" in str(e): raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, @@ -491,37 +459,6 @@ def store_doc( ) -class TextRAGForm(BaseModel): - name: str - content: str - collection_name: Optional[str] = None - - -@app.post("/text") -def store_text( - form_data: TextRAGForm, - user=Depends(get_current_user), -): - - collection_name = form_data.collection_name - if collection_name == None: - collection_name = calculate_sha256_string(form_data.content) - - result = store_text_in_vector_db( - form_data.content, - metadata={"name": form_data.name, "created_by": user.id}, - collection_name=collection_name, - ) - - if result: - return {"status": True, "collection_name": collection_name} - else: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail=ERROR_MESSAGES.DEFAULT(), - ) - - @app.get("/scan") def scan_docs_dir(user=Depends(get_admin_user)): for path in Path(DOCS_DIR).rglob("./**/*"): @@ -540,45 +477,41 @@ def scan_docs_dir(user=Depends(get_admin_user)): ) data = loader.load() - try: - result = store_data_in_vector_db(data, collection_name) + result = store_data_in_vector_db(data, collection_name) - if result: - sanitized_filename = sanitize_filename(filename) - doc = Documents.get_doc_by_name(sanitized_filename) + if result: + sanitized_filename = sanitize_filename(filename) + doc = Documents.get_doc_by_name(sanitized_filename) - if doc == None: - doc = Documents.insert_new_doc( - user.id, - DocumentForm( - **{ - "name": sanitized_filename, - "title": filename, - "collection_name": collection_name, - "filename": filename, - "content": ( - json.dumps( - { - "tags": list( - map( - lambda name: {"name": name}, - tags, - ) + if doc == None: + doc = Documents.insert_new_doc( + user.id, + DocumentForm( + **{ + "name": sanitized_filename, + "title": filename, + "collection_name": collection_name, + "filename": filename, + "content": ( + json.dumps( + { + "tags": list( + map( + lambda name: {"name": name}, + tags, ) - } - ) - if len(tags) - else "{}" - ), - } - ), - ) - except Exception as e: - log.exception(e) - pass + ) + } + ) + if len(tags) + else "{}" + ), + } + ), + ) except Exception as e: - log.exception(e) + print(e) return True @@ -599,11 +532,11 @@ def reset(user=Depends(get_admin_user)) -> bool: elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: - log.error("Failed to delete %s. Reason: %s" % (file_path, e)) + print("Failed to delete %s. Reason: %s" % (file_path, e)) try: CHROMA_CLIENT.reset() except Exception as e: - log.exception(e) + print(e) return True diff --git a/backend/apps/rag/utils.py b/backend/apps/rag/utils.py index 7b9e6628..b2da7d90 100644 --- a/backend/apps/rag/utils.py +++ b/backend/apps/rag/utils.py @@ -1,11 +1,7 @@ import re -import logging from typing import List -from config import SRC_LOG_LEVELS, CHROMA_CLIENT - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["RAG"]) +from config import CHROMA_CLIENT def query_doc(collection_name: str, query: str, k: int, embedding_function): @@ -95,13 +91,14 @@ def query_collection( def rag_template(template: str, context: str, query: str): - template = template.replace("[context]", context) - template = template.replace("[query]", query) + template = re.sub(r"\[context\]", context, template) + template = re.sub(r"\[query\]", query, template) + return template def rag_messages(docs, messages, template, k, embedding_function): - log.debug(f"docs: {docs}") + print(docs) last_user_message_idx = None for i in range(len(messages) - 1, -1, -1): @@ -141,8 +138,6 @@ def rag_messages(docs, messages, template, k, embedding_function): k=k, embedding_function=embedding_function, ) - elif doc["type"] == "text": - context = doc["content"] else: context = query_doc( collection_name=doc["collection_name"], @@ -151,13 +146,11 @@ def rag_messages(docs, messages, template, k, embedding_function): embedding_function=embedding_function, ) except Exception as e: - log.exception(e) + print(e) context = None relevant_contexts.append(context) - log.debug(f"relevant_contexts: {relevant_contexts}") - context_string = "" for context in relevant_contexts: if context: diff --git a/backend/apps/web/internal/db.py b/backend/apps/web/internal/db.py index 554f8002..d0aa9969 100644 --- a/backend/apps/web/internal/db.py +++ b/backend/apps/web/internal/db.py @@ -1,16 +1,13 @@ from peewee import * -from config import SRC_LOG_LEVELS, DATA_DIR +from config import DATA_DIR import os -import logging -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["DB"]) # Check if the file exists if os.path.exists(f"{DATA_DIR}/ollama.db"): # Rename the file os.rename(f"{DATA_DIR}/ollama.db", f"{DATA_DIR}/webui.db") - log.info("File renamed successfully.") + print("File renamed successfully.") else: pass diff --git a/backend/apps/web/main.py b/backend/apps/web/main.py index dd5c0c70..bd14f4bd 100644 --- a/backend/apps/web/main.py +++ b/backend/apps/web/main.py @@ -19,7 +19,6 @@ from config import ( DEFAULT_USER_ROLE, ENABLE_SIGNUP, USER_PERMISSIONS, - WEBHOOK_URL, ) app = FastAPI() @@ -33,7 +32,6 @@ app.state.DEFAULT_MODELS = DEFAULT_MODELS app.state.DEFAULT_PROMPT_SUGGESTIONS = DEFAULT_PROMPT_SUGGESTIONS app.state.DEFAULT_USER_ROLE = DEFAULT_USER_ROLE app.state.USER_PERMISSIONS = USER_PERMISSIONS -app.state.WEBHOOK_URL = WEBHOOK_URL app.add_middleware( diff --git a/backend/apps/web/models/auths.py b/backend/apps/web/models/auths.py index 75637700..02d2ab86 100644 --- a/backend/apps/web/models/auths.py +++ b/backend/apps/web/models/auths.py @@ -2,7 +2,6 @@ from pydantic import BaseModel from typing import List, Union, Optional import time import uuid -import logging from peewee import * from apps.web.models.users import UserModel, Users @@ -10,11 +9,6 @@ from utils.utils import verify_password from apps.web.internal.db import DB -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MODELS"]) - #################### # DB MODEL #################### @@ -92,7 +86,7 @@ class AuthsTable: def insert_new_auth( self, email: str, password: str, name: str, role: str = "pending" ) -> Optional[UserModel]: - log.info("insert_new_auth") + print("insert_new_auth") id = str(uuid.uuid4()) @@ -109,7 +103,7 @@ class AuthsTable: return None def authenticate_user(self, email: str, password: str) -> Optional[UserModel]: - log.info(f"authenticate_user: {email}") + print("authenticate_user", email) try: auth = Auth.get(Auth.email == email, Auth.active == True) if auth: diff --git a/backend/apps/web/models/chats.py b/backend/apps/web/models/chats.py index c9d13004..ca55c71d 100644 --- a/backend/apps/web/models/chats.py +++ b/backend/apps/web/models/chats.py @@ -95,6 +95,20 @@ class ChatTable: except: return None + def update_chat_by_id(self, id: str, chat: dict) -> Optional[ChatModel]: + try: + query = Chat.update( + chat=json.dumps(chat), + title=chat["title"] if "title" in chat else "New Chat", + timestamp=int(time.time()), + ).where(Chat.id == id) + query.execute() + + chat = Chat.get(Chat.id == id) + return ChatModel(**model_to_dict(chat)) + except: + return None + def get_chat_lists_by_user_id( self, user_id: str, skip: int = 0, limit: int = 50 ) -> List[ChatModel]: diff --git a/backend/apps/web/models/documents.py b/backend/apps/web/models/documents.py index 91e721a4..6a372b2c 100644 --- a/backend/apps/web/models/documents.py +++ b/backend/apps/web/models/documents.py @@ -3,7 +3,6 @@ from peewee import * from playhouse.shortcuts import model_to_dict from typing import List, Union, Optional import time -import logging from utils.utils import decode_token from utils.misc import get_gravatar_url @@ -12,11 +11,6 @@ from apps.web.internal.db import DB import json -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MODELS"]) - #################### # Documents DB Schema #################### @@ -124,7 +118,7 @@ class DocumentsTable: doc = Document.get(Document.name == form_data.name) return DocumentModel(**model_to_dict(doc)) except Exception as e: - log.exception(e) + print(e) return None def update_doc_content_by_name( @@ -144,7 +138,7 @@ class DocumentsTable: doc = Document.get(Document.name == name) return DocumentModel(**model_to_dict(doc)) except Exception as e: - log.exception(e) + print(e) return None def delete_doc_by_name(self, name: str) -> bool: diff --git a/backend/apps/web/models/modelfiles.py b/backend/apps/web/models/modelfiles.py index 50439a80..51eccfb5 100644 --- a/backend/apps/web/models/modelfiles.py +++ b/backend/apps/web/models/modelfiles.py @@ -64,8 +64,8 @@ class ModelfilesTable: self.db.create_tables([Modelfile]) def insert_new_modelfile( - self, user_id: str, form_data: ModelfileForm - ) -> Optional[ModelfileModel]: + self, user_id: str, + form_data: ModelfileForm) -> Optional[ModelfileModel]: if "tagName" in form_data.modelfile: modelfile = ModelfileModel( **{ @@ -73,8 +73,7 @@ class ModelfilesTable: "tag_name": form_data.modelfile["tagName"], "modelfile": json.dumps(form_data.modelfile), "timestamp": int(time.time()), - } - ) + }) try: result = Modelfile.create(**modelfile.model_dump()) @@ -88,28 +87,29 @@ class ModelfilesTable: else: return None - def get_modelfile_by_tag_name(self, tag_name: str) -> Optional[ModelfileModel]: + def get_modelfile_by_tag_name(self, + tag_name: str) -> Optional[ModelfileModel]: try: modelfile = Modelfile.get(Modelfile.tag_name == tag_name) return ModelfileModel(**model_to_dict(modelfile)) except: return None - def get_modelfiles(self, skip: int = 0, limit: int = 50) -> List[ModelfileResponse]: + def get_modelfiles(self, + skip: int = 0, + limit: int = 50) -> List[ModelfileResponse]: return [ ModelfileResponse( **{ **model_to_dict(modelfile), - "modelfile": json.loads(modelfile.modelfile), - } - ) - for modelfile in Modelfile.select() + "modelfile": + json.loads(modelfile.modelfile), + }) for modelfile in Modelfile.select() # .limit(limit).offset(skip) ] def update_modelfile_by_tag_name( - self, tag_name: str, modelfile: dict - ) -> Optional[ModelfileModel]: + self, tag_name: str, modelfile: dict) -> Optional[ModelfileModel]: try: query = Modelfile.update( modelfile=json.dumps(modelfile), diff --git a/backend/apps/web/models/prompts.py b/backend/apps/web/models/prompts.py index e6b663c0..044a3697 100644 --- a/backend/apps/web/models/prompts.py +++ b/backend/apps/web/models/prompts.py @@ -52,9 +52,8 @@ class PromptsTable: self.db = db self.db.create_tables([Prompt]) - def insert_new_prompt( - self, user_id: str, form_data: PromptForm - ) -> Optional[PromptModel]: + def insert_new_prompt(self, user_id: str, + form_data: PromptForm) -> Optional[PromptModel]: prompt = PromptModel( **{ "user_id": user_id, @@ -62,8 +61,7 @@ class PromptsTable: "title": form_data.title, "content": form_data.content, "timestamp": int(time.time()), - } - ) + }) try: result = Prompt.create(**prompt.model_dump()) @@ -83,14 +81,13 @@ class PromptsTable: def get_prompts(self) -> List[PromptModel]: return [ - PromptModel(**model_to_dict(prompt)) - for prompt in Prompt.select() + PromptModel(**model_to_dict(prompt)) for prompt in Prompt.select() # .limit(limit).offset(skip) ] def update_prompt_by_command( - self, command: str, form_data: PromptForm - ) -> Optional[PromptModel]: + self, command: str, + form_data: PromptForm) -> Optional[PromptModel]: try: query = Prompt.update( title=form_data.title, diff --git a/backend/apps/web/models/tags.py b/backend/apps/web/models/tags.py index 196551b7..d4264501 100644 --- a/backend/apps/web/models/tags.py +++ b/backend/apps/web/models/tags.py @@ -6,15 +6,9 @@ from playhouse.shortcuts import model_to_dict import json import uuid import time -import logging from apps.web.internal.db import DB -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MODELS"]) - #################### # Tag DB Schema #################### @@ -179,7 +173,7 @@ class TagTable: (ChatIdTag.tag_name == tag_name) & (ChatIdTag.user_id == user_id) ) res = query.execute() # Remove the rows, return number of rows removed. - log.debug(f"res: {res}") + print(res) tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id) if tag_count == 0: @@ -191,7 +185,7 @@ class TagTable: return True except Exception as e: - log.error(f"delete_tag: {e}") + print("delete_tag", e) return False def delete_tag_by_tag_name_and_chat_id_and_user_id( @@ -204,7 +198,7 @@ class TagTable: & (ChatIdTag.user_id == user_id) ) res = query.execute() # Remove the rows, return number of rows removed. - log.debug(f"res: {res}") + print(res) tag_count = self.count_chat_ids_by_tag_name_and_user_id(tag_name, user_id) if tag_count == 0: @@ -216,7 +210,7 @@ class TagTable: return True except Exception as e: - log.error(f"delete_tag: {e}") + print("delete_tag", e) return False def delete_tags_by_chat_id_and_user_id(self, chat_id: str, user_id: str) -> bool: diff --git a/backend/apps/web/routers/auths.py b/backend/apps/web/routers/auths.py index d881ec74..3db2d0ad 100644 --- a/backend/apps/web/routers/auths.py +++ b/backend/apps/web/routers/auths.py @@ -27,8 +27,7 @@ from utils.utils import ( create_token, ) from utils.misc import parse_duration, validate_email_format -from utils.webhook import post_webhook -from constants import ERROR_MESSAGES, WEBHOOK_MESSAGES +from constants import ERROR_MESSAGES router = APIRouter() @@ -156,17 +155,6 @@ async def signup(request: Request, form_data: SignupForm): ) # response.set_cookie(key='token', value=token, httponly=True) - if request.app.state.WEBHOOK_URL: - post_webhook( - request.app.state.WEBHOOK_URL, - WEBHOOK_MESSAGES.USER_SIGNUP(user.name), - { - "action": "signup", - "message": WEBHOOK_MESSAGES.USER_SIGNUP(user.name), - "user": user.model_dump_json(exclude_none=True), - }, - ) - return { "token": token, "token_type": "Bearer", diff --git a/backend/apps/web/routers/chats.py b/backend/apps/web/routers/chats.py index 5f8c61b7..0c0ac1ce 100644 --- a/backend/apps/web/routers/chats.py +++ b/backend/apps/web/routers/chats.py @@ -5,7 +5,6 @@ from utils.utils import get_current_user, get_admin_user from fastapi import APIRouter from pydantic import BaseModel import json -import logging from apps.web.models.users import Users from apps.web.models.chats import ( @@ -28,11 +27,6 @@ from apps.web.models.tags import ( from constants import ERROR_MESSAGES -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MODELS"]) - router = APIRouter() ############################ @@ -84,7 +78,7 @@ async def create_new_chat(form_data: ChatForm, user=Depends(get_current_user)): chat = Chats.insert_new_chat(user.id, form_data) return ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)}) except Exception as e: - log.exception(e) + print(e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT() ) @@ -101,7 +95,7 @@ async def get_all_tags(user=Depends(get_current_user)): tags = Tags.get_tags_by_user_id(user.id) return tags except Exception as e: - log.exception(e) + print(e) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.DEFAULT() ) diff --git a/backend/apps/web/routers/configs.py b/backend/apps/web/routers/configs.py index 0bad55a6..b293a398 100644 --- a/backend/apps/web/routers/configs.py +++ b/backend/apps/web/routers/configs.py @@ -10,12 +10,7 @@ import uuid from apps.web.models.users import Users -from utils.utils import ( - get_password_hash, - get_current_user, - get_admin_user, - create_token, -) +from utils.utils import get_password_hash, get_current_user, get_admin_user, create_token from utils.misc import get_gravatar_url, validate_email_format from constants import ERROR_MESSAGES @@ -48,6 +43,7 @@ async def set_global_default_models( return request.app.state.DEFAULT_MODELS + @router.post("/default/suggestions", response_model=List[PromptSuggestion]) async def set_global_default_suggestions( request: Request, diff --git a/backend/apps/web/routers/modelfiles.py b/backend/apps/web/routers/modelfiles.py index 3cdbf8a7..0c5c1216 100644 --- a/backend/apps/web/routers/modelfiles.py +++ b/backend/apps/web/routers/modelfiles.py @@ -24,9 +24,9 @@ router = APIRouter() @router.get("/", response_model=List[ModelfileResponse]) -async def get_modelfiles( - skip: int = 0, limit: int = 50, user=Depends(get_current_user) -): +async def get_modelfiles(skip: int = 0, + limit: int = 50, + user=Depends(get_current_user)): return Modelfiles.get_modelfiles(skip, limit) @@ -36,16 +36,17 @@ async def get_modelfiles( @router.post("/create", response_model=Optional[ModelfileResponse]) -async def create_new_modelfile(form_data: ModelfileForm, user=Depends(get_admin_user)): +async def create_new_modelfile(form_data: ModelfileForm, + user=Depends(get_admin_user)): modelfile = Modelfiles.insert_new_modelfile(user.id, form_data) if modelfile: return ModelfileResponse( **{ **modelfile.model_dump(), - "modelfile": json.loads(modelfile.modelfile), - } - ) + "modelfile": + json.loads(modelfile.modelfile), + }) else: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, @@ -59,18 +60,17 @@ async def create_new_modelfile(form_data: ModelfileForm, user=Depends(get_admin_ @router.post("/", response_model=Optional[ModelfileResponse]) -async def get_modelfile_by_tag_name( - form_data: ModelfileTagNameForm, user=Depends(get_current_user) -): +async def get_modelfile_by_tag_name(form_data: ModelfileTagNameForm, + user=Depends(get_current_user)): modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name) if modelfile: return ModelfileResponse( **{ **modelfile.model_dump(), - "modelfile": json.loads(modelfile.modelfile), - } - ) + "modelfile": + json.loads(modelfile.modelfile), + }) else: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, @@ -84,9 +84,8 @@ async def get_modelfile_by_tag_name( @router.post("/update", response_model=Optional[ModelfileResponse]) -async def update_modelfile_by_tag_name( - form_data: ModelfileUpdateForm, user=Depends(get_admin_user) -): +async def update_modelfile_by_tag_name(form_data: ModelfileUpdateForm, + user=Depends(get_admin_user)): modelfile = Modelfiles.get_modelfile_by_tag_name(form_data.tag_name) if modelfile: updated_modelfile = { @@ -95,15 +94,14 @@ async def update_modelfile_by_tag_name( } modelfile = Modelfiles.update_modelfile_by_tag_name( - form_data.tag_name, updated_modelfile - ) + form_data.tag_name, updated_modelfile) return ModelfileResponse( **{ **modelfile.model_dump(), - "modelfile": json.loads(modelfile.modelfile), - } - ) + "modelfile": + json.loads(modelfile.modelfile), + }) else: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, @@ -117,8 +115,7 @@ async def update_modelfile_by_tag_name( @router.delete("/delete", response_model=bool) -async def delete_modelfile_by_tag_name( - form_data: ModelfileTagNameForm, user=Depends(get_admin_user) -): +async def delete_modelfile_by_tag_name(form_data: ModelfileTagNameForm, + user=Depends(get_admin_user)): result = Modelfiles.delete_modelfile_by_tag_name(form_data.tag_name) return result diff --git a/backend/apps/web/routers/users.py b/backend/apps/web/routers/users.py index 6f1020ec..b8e2732c 100644 --- a/backend/apps/web/routers/users.py +++ b/backend/apps/web/routers/users.py @@ -7,7 +7,6 @@ from fastapi import APIRouter from pydantic import BaseModel import time import uuid -import logging from apps.web.models.users import UserModel, UserUpdateForm, UserRoleUpdateForm, Users from apps.web.models.auths import Auths @@ -15,11 +14,6 @@ from apps.web.models.auths import Auths from utils.utils import get_current_user, get_password_hash, get_admin_user from constants import ERROR_MESSAGES -from config import SRC_LOG_LEVELS - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MODELS"]) - router = APIRouter() ############################ @@ -89,7 +83,7 @@ async def update_user_by_id( if form_data.password: hashed = get_password_hash(form_data.password) - log.debug(f"hashed: {hashed}") + print(hashed) Auths.update_user_password_by_id(user_id, hashed) Auths.update_email_by_id(user_id, form_data.email.lower()) diff --git a/backend/apps/web/routers/utils.py b/backend/apps/web/routers/utils.py index 4b5ac8cf..fbb350cf 100644 --- a/backend/apps/web/routers/utils.py +++ b/backend/apps/web/routers/utils.py @@ -21,6 +21,155 @@ from constants import ERROR_MESSAGES router = APIRouter() +class UploadBlobForm(BaseModel): + filename: str + + +from urllib.parse import urlparse + + +def parse_huggingface_url(hf_url): + try: + # Parse the URL + parsed_url = urlparse(hf_url) + + # Get the path and split it into components + path_components = parsed_url.path.split("/") + + # Extract the desired output + user_repo = "/".join(path_components[1:3]) + model_file = path_components[-1] + + return model_file + except ValueError: + return None + + +async def download_file_stream(url, file_path, file_name, chunk_size=1024 * 1024): + done = False + + if os.path.exists(file_path): + current_size = os.path.getsize(file_path) + else: + current_size = 0 + + headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {} + + timeout = aiohttp.ClientTimeout(total=600) # Set the timeout + + async with aiohttp.ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers) as response: + total_size = int(response.headers.get("content-length", 0)) + current_size + + with open(file_path, "ab+") as file: + async for data in response.content.iter_chunked(chunk_size): + current_size += len(data) + file.write(data) + + done = current_size == total_size + progress = round((current_size / total_size) * 100, 2) + yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n' + + if done: + file.seek(0) + hashed = calculate_sha256(file) + file.seek(0) + + url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}" + response = requests.post(url, data=file) + + if response.ok: + res = { + "done": done, + "blob": f"sha256:{hashed}", + "name": file_name, + } + os.remove(file_path) + + yield f"data: {json.dumps(res)}\n\n" + else: + raise "Ollama: Could not create blob, Please try again." + + +@router.get("/download") +async def download( + url: str, +): + # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf" + file_name = parse_huggingface_url(url) + + if file_name: + file_path = f"{UPLOAD_DIR}/{file_name}" + + return StreamingResponse( + download_file_stream(url, file_path, file_name), + media_type="text/event-stream", + ) + else: + return None + + +@router.post("/upload") +def upload(file: UploadFile = File(...)): + file_path = f"{UPLOAD_DIR}/{file.filename}" + + # Save file in chunks + with open(file_path, "wb+") as f: + for chunk in file.file: + f.write(chunk) + + def file_process_stream(): + total_size = os.path.getsize(file_path) + chunk_size = 1024 * 1024 + try: + with open(file_path, "rb") as f: + total = 0 + done = False + + while not done: + chunk = f.read(chunk_size) + if not chunk: + done = True + continue + + total += len(chunk) + progress = round((total / total_size) * 100, 2) + + res = { + "progress": progress, + "total": total_size, + "completed": total, + } + yield f"data: {json.dumps(res)}\n\n" + + if done: + f.seek(0) + hashed = calculate_sha256(f) + f.seek(0) + + url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}" + response = requests.post(url, data=f) + + if response.ok: + res = { + "done": done, + "blob": f"sha256:{hashed}", + "name": file.filename, + } + os.remove(file_path) + yield f"data: {json.dumps(res)}\n\n" + else: + raise Exception( + "Ollama: Could not create blob, Please try again." + ) + + except Exception as e: + res = {"error": str(e)} + yield f"data: {json.dumps(res)}\n\n" + + return StreamingResponse(file_process_stream(), media_type="text/event-stream") + + @router.get("/gravatar") async def get_gravatar( email: str, diff --git a/backend/config.py b/backend/config.py index 0be86de8..3bc00323 100644 --- a/backend/config.py +++ b/backend/config.py @@ -1,6 +1,4 @@ import os -import sys -import logging import chromadb from chromadb import Settings from base64 import b64encode @@ -23,10 +21,9 @@ try: load_dotenv(find_dotenv("../.env")) except ImportError: - log.warning("dotenv not installed, skipping...") + print("dotenv not installed, skipping...") WEBUI_NAME = "Open WebUI" -WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png" shutil.copyfile("../build/favicon.png", "./static/favicon.png") #################################### @@ -103,47 +100,6 @@ for version in soup.find_all("h2"): CHANGELOG = changelog_json -#################################### -# LOGGING -#################################### -log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"] - -GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper() -if GLOBAL_LOG_LEVEL in log_levels: - logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True) -else: - GLOBAL_LOG_LEVEL = "INFO" - -log = logging.getLogger(__name__) -log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}") - -log_sources = [ - "AUDIO", - "COMFYUI", - "CONFIG", - "DB", - "IMAGES", - "LITELLM", - "MAIN", - "MODELS", - "OLLAMA", - "OPENAI", - "RAG", - "WEBHOOK", -] - -SRC_LOG_LEVELS = {} - -for source in log_sources: - log_env_var = source + "_LOG_LEVEL" - SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper() - if SRC_LOG_LEVELS[source] not in log_levels: - SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL - log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}") - -log.setLevel(SRC_LOG_LEVELS["CONFIG"]) - - #################################### # CUSTOM_NAME #################################### @@ -155,7 +111,7 @@ if CUSTOM_NAME: data = r.json() if r.ok: if "logo" in data: - WEBUI_FAVICON_URL = url = ( + url = ( f"https://api.openwebui.com{data['logo']}" if data["logo"][0] == "/" else data["logo"] @@ -169,7 +125,7 @@ if CUSTOM_NAME: WEBUI_NAME = data["name"] except Exception as e: - log.exception(e) + print(e) pass @@ -238,9 +194,9 @@ def create_config_file(file_path): LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml" if not os.path.exists(LITELLM_CONFIG_PATH): - log.info("Config file doesn't exist. Creating...") + print("Config file doesn't exist. Creating...") create_config_file(LITELLM_CONFIG_PATH) - log.info("Config file created successfully.") + print("Config file created successfully.") #################################### @@ -253,7 +209,7 @@ OLLAMA_API_BASE_URL = os.environ.get( OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "") INCLUDE_OLLAMA = os.environ.get("INCLUDE_OLLAMA_ENV", "false") -K8S_FLAG = os.environ.get("K8S_FLAG", "") + if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "": OLLAMA_BASE_URL = ( @@ -271,9 +227,6 @@ if ENV == "prod": else: OLLAMA_BASE_URL = "http://host.docker.internal:11434" - elif K8S_FLAG: - OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434" - OLLAMA_BASE_URLS = os.environ.get("OLLAMA_BASE_URLS", "") OLLAMA_BASE_URLS = OLLAMA_BASE_URLS if OLLAMA_BASE_URLS != "" else OLLAMA_BASE_URL @@ -303,10 +256,8 @@ OPENAI_API_BASE_URLS = ( OPENAI_API_BASE_URLS if OPENAI_API_BASE_URLS != "" else OPENAI_API_BASE_URL ) -OPENAI_API_BASE_URLS = [ - url.strip() if url != "" else "https://api.openai.com/v1" - for url in OPENAI_API_BASE_URLS.split(";") -] +OPENAI_API_BASE_URLS = [url.strip() for url in OPENAI_API_BASE_URLS.split(";")] + #################################### # WEBUI @@ -343,19 +294,13 @@ DEFAULT_PROMPT_SUGGESTIONS = ( DEFAULT_USER_ROLE = os.getenv("DEFAULT_USER_ROLE", "pending") - -USER_PERMISSIONS_CHAT_DELETION = ( - os.environ.get("USER_PERMISSIONS_CHAT_DELETION", "True").lower() == "true" -) - -USER_PERMISSIONS = {"chat": {"deletion": USER_PERMISSIONS_CHAT_DELETION}} +USER_PERMISSIONS = {"chat": {"deletion": True}} -MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", "False").lower() == "true" +MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", False) MODEL_FILTER_LIST = os.environ.get("MODEL_FILTER_LIST", "") MODEL_FILTER_LIST = [model.strip() for model in MODEL_FILTER_LIST.split(";")] -WEBHOOK_URL = os.environ.get("WEBHOOK_URL", "") #################################### # WEBUI_VERSION @@ -440,4 +385,3 @@ WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models" #################################### AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "") -COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "") diff --git a/backend/constants.py b/backend/constants.py index 8bcdd078..eacf8a20 100644 --- a/backend/constants.py +++ b/backend/constants.py @@ -5,13 +5,6 @@ class MESSAGES(str, Enum): DEFAULT = lambda msg="": f"{msg if msg else ''}" -class WEBHOOK_MESSAGES(str, Enum): - DEFAULT = lambda msg="": f"{msg if msg else ''}" - USER_SIGNUP = lambda username="": ( - f"New user signed up: {username}" if username else "New user signed up" - ) - - class ERROR_MESSAGES(str, Enum): def __str__(self) -> str: return super().__str__() @@ -53,12 +46,9 @@ class ERROR_MESSAGES(str, Enum): PANDOC_NOT_INSTALLED = "Pandoc is not installed on the server. Please contact your administrator for assistance." INCORRECT_FORMAT = ( - lambda err="": f"Invalid format. Please use the correct format{err}" + lambda err="": f"Invalid format. Please use the correct format{err if err else ''}" ) RATE_LIMIT_EXCEEDED = "API rate limit exceeded" MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found" OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found" - OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama" - - EMPTY_CONTENT = "The content provided is empty. Please ensure that there is text or data present before proceeding." diff --git a/backend/data/config.json b/backend/data/config.json index 604ffb03..d3ada59c 100644 --- a/backend/data/config.json +++ b/backend/data/config.json @@ -1,24 +1,35 @@ { - "version": 0, - "ui": { - "default_locale": "en-US", - "prompt_suggestions": [ - { - "title": ["Help me study", "vocabulary for a college entrance exam"], - "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option." - }, - { - "title": ["Give me ideas", "for what to do with my kids' art"], - "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter." - }, - { - "title": ["Tell me a fun fact", "about the Roman Empire"], - "content": "Tell me a random fun fact about the Roman Empire" - }, - { - "title": ["Show me a code snippet", "of a website's sticky header"], - "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript." - } - ] - } -} + "version": "0.0.1", + "ui": { + "prompt_suggestions": [ + { + "title": [ + "Help me study", + "vocabulary for a college entrance exam" + ], + "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option." + }, + { + "title": [ + "Give me ideas", + "for what to do with my kids' art" + ], + "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter." + }, + { + "title": [ + "Tell me a fun fact", + "about the Roman Empire" + ], + "content": "Tell me a random fun fact about the Roman Empire" + }, + { + "title": [ + "Show me a code snippet", + "of a website's sticky header" + ], + "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript." + } + ] + } +} \ No newline at end of file diff --git a/backend/main.py b/backend/main.py index 8cc704a2..25322718 100644 --- a/backend/main.py +++ b/backend/main.py @@ -4,7 +4,6 @@ import markdown import time import os import sys -import logging import requests from fastapi import FastAPI, Request, Depends, status @@ -32,7 +31,6 @@ from utils.utils import get_admin_user from apps.rag.utils import rag_messages from config import ( - CONFIG_DATA, WEBUI_NAME, ENV, VERSION, @@ -40,16 +38,9 @@ from config import ( FRONTEND_BUILD_DIR, MODEL_FILTER_ENABLED, MODEL_FILTER_LIST, - GLOBAL_LOG_LEVEL, - SRC_LOG_LEVELS, - WEBHOOK_URL, ) from constants import ERROR_MESSAGES -logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL) -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["MAIN"]) - class SPAStaticFiles(StaticFiles): async def get_response(self, path: str, scope): @@ -67,9 +58,6 @@ app = FastAPI(docs_url="/docs" if ENV == "dev" else None, redoc_url=None) app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST -app.state.WEBHOOK_URL = WEBHOOK_URL - - origins = ["*"] @@ -78,7 +66,7 @@ class RAGMiddleware(BaseHTTPMiddleware): if request.method == "POST" and ( "/api/chat" in request.url.path or "/chat/completions" in request.url.path ): - log.debug(f"request.url.path: {request.url.path}") + print(request.url.path) # Read the original request body body = await request.body() @@ -90,6 +78,7 @@ class RAGMiddleware(BaseHTTPMiddleware): # Example: Add a new key-value pair or modify existing ones # data["modified"] = True # Example modification if "docs" in data: + data = {**data} data["messages"] = rag_messages( data["docs"], @@ -100,7 +89,7 @@ class RAGMiddleware(BaseHTTPMiddleware): ) del data["docs"] - log.debug(f"data['messages']: {data['messages']}") + print(data["messages"]) modified_body_bytes = json.dumps(data).encode("utf-8") @@ -164,18 +153,11 @@ app.mount("/rag/api/v1", rag_app) @app.get("/api/config") async def get_app_config(): - # Checking and Handling the Absence of 'ui' in CONFIG_DATA - default_locale = "en-US" - if "ui" in CONFIG_DATA: - default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US") - - # The Rest of the Function Now Uses the Variables Defined Above return { "status": True, "name": WEBUI_NAME, "version": VERSION, - "default_locale": default_locale, "images": images_app.state.ENABLED, "default_models": webui_app.state.DEFAULT_MODELS, "default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS, @@ -196,9 +178,10 @@ class ModelFilterConfigForm(BaseModel): @app.post("/api/config/model/filter") -async def update_model_filter_config( +async def get_model_filter_config( form_data: ModelFilterConfigForm, user=Depends(get_admin_user) ): + app.state.MODEL_FILTER_ENABLED = form_data.enabled app.state.MODEL_FILTER_LIST = form_data.models @@ -208,39 +191,15 @@ async def update_model_filter_config( openai_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED openai_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST - litellm_app.state.MODEL_FILTER_ENABLED = app.state.MODEL_FILTER_ENABLED - litellm_app.state.MODEL_FILTER_LIST = app.state.MODEL_FILTER_LIST - return { "enabled": app.state.MODEL_FILTER_ENABLED, "models": app.state.MODEL_FILTER_LIST, } -@app.get("/api/webhook") -async def get_webhook_url(user=Depends(get_admin_user)): - return { - "url": app.state.WEBHOOK_URL, - } - - -class UrlForm(BaseModel): - url: str - - -@app.post("/api/webhook") -async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)): - app.state.WEBHOOK_URL = form_data.url - - webui_app.state.WEBHOOK_URL = app.state.WEBHOOK_URL - - return { - "url": app.state.WEBHOOK_URL, - } - - @app.get("/api/version") async def get_app_config(): + return { "version": VERSION, } @@ -248,7 +207,7 @@ async def get_app_config(): @app.get("/api/changelog") async def get_app_changelog(): - return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5} + return CHANGELOG @app.get("/api/version/updates") diff --git a/backend/requirements.txt b/backend/requirements.txt index df8fcfec..29fb3492 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -45,4 +45,3 @@ PyJWT pyjwt[crypto] black -langfuse diff --git a/backend/start.sh b/backend/start.sh index bb106ff4..ca0e9688 100755 --- a/backend/start.sh +++ b/backend/start.sh @@ -28,9 +28,4 @@ if [ "$INCLUDE_OLLAMA" = "true" ]; then ollama serve & fi -if [ "$USE_CUDA_DOCKER" = "true" ]; then - echo "CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries." - export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib" -fi - WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host 0.0.0.0 --port "$PORT" --forwarded-allow-ips '*' diff --git a/backend/utils/webhook.py b/backend/utils/webhook.py deleted file mode 100644 index b6692e53..00000000 --- a/backend/utils/webhook.py +++ /dev/null @@ -1,54 +0,0 @@ -import json -import requests -import logging - -from config import SRC_LOG_LEVELS, VERSION, WEBUI_FAVICON_URL, WEBUI_NAME - -log = logging.getLogger(__name__) -log.setLevel(SRC_LOG_LEVELS["WEBHOOK"]) - - -def post_webhook(url: str, message: str, event_data: dict) -> bool: - try: - payload = {} - - # Slack and Google Chat Webhooks - if "https://hooks.slack.com" in url or "https://chat.googleapis.com" in url: - payload["text"] = message - # Discord Webhooks - elif "https://discord.com/api/webhooks" in url: - payload["content"] = message - # Microsoft Teams Webhooks - elif "webhook.office.com" in url: - action = event_data.get("action", "undefined") - facts = [ - {"name": name, "value": value} - for name, value in json.loads(event_data.get("user", {})).items() - ] - payload = { - "@type": "MessageCard", - "@context": "http://schema.org/extensions", - "themeColor": "0076D7", - "summary": message, - "sections": [ - { - "activityTitle": message, - "activitySubtitle": f"{WEBUI_NAME} ({VERSION}) - {action}", - "activityImage": WEBUI_FAVICON_URL, - "facts": facts, - "markdown": True, - } - ], - } - # Default Payload - else: - payload = {**event_data} - - log.debug(f"payload: {payload}") - r = requests.post(url, json=payload) - r.raise_for_status() - log.debug(f"r.text: {r.text}") - return True - except Exception as e: - log.exception(e) - return False diff --git a/demo.gif b/demo.gif index 5a3f36269dee54447f1567968c00e3bdb3adf2f0..4c2fbd539257767af922bef007963d12abdb198c 100644 GIT binary patch delta 5923218 zcmV)4K+397n@;TtwyapIWY20f`xI^1wrkaGy0mD^ zq)%%${TOxV)~i*g9_rGgl#&~B>)y?~x9{J;gJTqFvN-YM#*-souDmWe^XJZ^L!U0a zI`!+;vt!?`y*u~s-ou0cA1}T<`Sa$}qhGJSJ^T0W*Tkx>yGla>TN6}jY4w-Kk|nryb|=9_IYD5ryT&S{hB1R{S*U4(E} zYU!nzX8O>FwCz-zr=WH!s!gAcS}LibqH1cYr?$%KtE#w^8tLw173R^6(z#?mGvd1>d?6b>8tL(JSN?R?p&|+(Cw%2yc?YG;8tL?bnid!zZ z;G%18y63jb?z`*8tM0t+%3FUgy{~$hrl$P%>+io6N*d|F0vAm1!3HOc@WKi=%<#hw zM-1`A5?4&|#TI9*AA}tN?D5AShwKoUB%5h+$ta(!a>^{P>~hO6zbtdiG|y~v%{bqz zbIv^P>~qgR|15OSD~7bM$VexxbiW!mjq%e?M-BDVQddp&)mCSX_11q{cg^+JUL#29 z(qxxyHin$OHk#V5v9`9`ZOg`XY_QGtw%l;jeRth++y5Om-+BA3_uqRLPWa)2CocHn zeK#Js<9J67xa5W_4*BGmSI+q3oKueY=AK*LdFG#AuDR)@kG}S&N1yHb>#%DSHrTY! zZhP&x->!S^yzlON@4$cmE`0F4a~%8e$S2Pb(L*=S{PWI75B>DgS5N)*)@P6X_S$#P z{rBF75B~T{!ekq61WBm;`s|lI{QJbiFMs^>&u@SI_}{O8{`~Lne|BSQp8yA_w6xvp zfbhbXy%LDP1v*fH5Nu!s8A!ngO7MaeoS+3Wh`|kdP=g@sU!&1k9lh{q?^d_V0&23?dN!hbY7%67h&eOd=9%XS^Lc@rjlq zpA^MM#VJ~`idej&7PH93EqbwwVEm#O!$`(4jtPfQOrshtq{KEZ(T#9?qa5Q%$2r=u zj(EHy))ElF-~fMNjetyJ+Ekai(1DJ0qBA7w5{bw|o{o@=e54{F>BvGha*>scq$MSJ zNlIe!lb#IaBuCjvQgZT?n@pu9SDDFHg3^?#bfqXuNz1$a@qFqFq%L>4kv#fRkG~8i zFo!A3ViNP1#!P1Ln#jv$I&&gsETc4|NzG|mvzpkvrZ#`G$<1wgvzy@j<~Pw85%uMA zI0!lCGuK%uWx5lY@O-B{<4MnX+OwYcyytZIsQ*sg{Ba=vG)O?%Nl=g(kcKamp+XTl z!-P`MhOh!aOC-urZ+Xa}7;UIV7n)IyZnUBx?Py3pD$GNVCz+XvDn@A_8$@eXk~{Rr&`AH zmBwWya#5+=SsKQy31|mfrz>51v|lssCj>Yu!UCq0=va&vMpL65|7y8$J5boTa5$J zO>A|i$I&d60kDO3*n?6wPBU%StmZbe8UMD-OtCFmOy|YEc*b~UqhR+8SeC6-t|sY_ zM61eJ>Pq0QGV=0T#S7jcoAxGwK2d{?Ij5k-?B(s5lcg!WrQs&0Rp zRF@8kqVL;hKD)Zruzt0yV@>N>+xo=)+*A^<%3>Gu`qx`tZ=Q$k8#fzUK?(S*K!R&z zfe^a0rG@c%lAzru?@J1#_-trFqS`~-*Qnh7FH903fz>8K0^gMavz4RRF$-kHpB9LA zZGmPf*p$^b#BpkKnvzX$7T+RicW{5VaOyW7```voIKmeWMBApAx4!Ky#Q!nziaQp@ z#LoC`4%$+fcKqWn1-Y*vx>l5|HwkV3sP1&_acF2sp&g}VuScZYj`&v97Npp$btUVO zoOgDsE!V7ghYa*j?fB0fA^28tzU)PFThr48GMU%vW02 zRz0WTr8gg`3trCJLe-1~bEJQrJm#vNnc=_}yzqlh{NYn9sudx4ohPpMtX}=^(630X zr+;f9l{~FC|9LxZ)AAtAySp~=wEtSCJd&z|GQYM#2u0V)g_8!pL6@v zUw1!%<LFdG)f{EU zbc)7W0N{IM26OzjTH2NX^M+)ohh@#NSxKN@zpz@TmThCUbn5nmIY(tGhIs<RFgA(BRgq>89YcvHm=InjUNnb;h_!N+*h^?7Vj4Dz zqF9O)W{MrwUI}o0Oh$B_)pEII4}&BZ-$xYUH(D_9gFZ)H(?x$}nnn^_XLJR~Rz$^C z9dT(ufMXuq`o(l{b!lZNduPRUsz!`3fr=q^irkor-nfn52x8($Vk1_Lq}Yw% zn2zIUj@r16=NOLU_>SsWV$dal6R2BRWnD@pY>CBd6v$%R7ZH;eZ<)t*#Q{wTA!Llz zV+Q$hw>Mb}kyw8wc!>q6gHy(IQc!4Sc6U?7h8`&=K+t0%p=dP7f+De2>XB`VH;DoP zd9v4dRW@4KmUuUKkv9kg^Zz!6*2Nt1_l2=%WLc4sKWl~{R|+W{f*1x>$2U@UQHhh|pF_YZ%2XjXGpaTDnfp7>7$$B75Y z5@vOX66FwzhI2$UZ#cMxV;N|3$x{J!bQnjG05D~+2ohema>lrb()WOl34tDReblCX z2i75r$s}|Noi()yrhwqcDP3j>Wn=(# zNVk81MMsx@myCls5Sh4HH5Zt@lwNcvcW^g%a{5vv*Pxx1V=S4BvM4R4H)XdWW!5EE zt;ku2_KZ{p1Z{9?lm97urzugv7!x*>eV-X!QMX=O*nXMXiu$&lvS<=_iCsc=pqV-St6Hb9+N!bosqH-ZY0cNKVga#(>EXA)t!UW(Og z_OPT+mXl32mK~lr~6O&}NcT_j~X9vG5AAA{(;$gnSYaac0?FbLLyT^?0KAI(2nOKZe=4C=xlP{)uzW56S2V_*0>lUaY5lrZ;v z5IJ1^6kXEBr@i=T`n8^M*^wkGvTQ{P)*+)@=pHC>UlO2%pvqXwc5dL76D+A5lXe_7 zv~Jo9X)m!a&;Jn;&`TU*DIL{Yyy!c!>YKjoOEo&3RVHeL0Z}5- z!tw~i@aV!tJj5*g!byC@LfphkY{c^j#Y7y%NG!!mOvO%IN+#C02N8dUyoPa`V1WY5 zT9%81c6OEzp;;!_gIO505rPv_mbdh4UBPA$)1@5ZHFfkhyL%X>4~c|as8|qsiRo$* zM8~lNrzW}o(Z0av5lv=hot2K#w$ZkWhFwNm~7T#?bU3Jt7P5QUG3Iz{nm0_ z*KvK(ZTT}iJfs>ShpC6f&Khn-`Rqtecz^y-};^3{Jq~F ziP8|Uip+oKhZ6T&6$iDwo!}f1#HGf=S&YR`T*VUp;8R@Tmyy+3k<}PJ;So;ZS8U-{ z9O4|##2sGZB!1!$j^ZFb;wj$ZEAHVUE=pU>kr45)&G)!sY*n4D;5=^G{@vd|{^LR( z@bfR^)7M2Q#Ug?sqQht3AeB|9?oR5pef_>iOb1vtkKI*1k>ZpF| z1A>3rjcIuUUS|dw>VCfEwBF^mUhBAi>$;xnyuRx^1HJ*S5REwIb_S#Ao#wKxpQ*m= zs?O}r-t5r+?BY@97~#i5W_yg*RsNgo(KjLv9_T9m;o=_SChp?n&f+g#?&%)xl0t`6o2s+pYa&a(aOui;q# z?G=_z>6d=8akjem~+PWP5ySaARL znxFZczxkejeCn3umyh*hFZQNi`lx?@`l_G$tiSpz)A6HE@}EEZonQO3fBUvyCvqSA zSO4dD|NBJ`^uizf#EP7h8n! zMHyqHaYh?!#PLQQbL4SHmpt_rBo&#|%%BIq1T#!A$0V~%)BHmdKsD2syATp1e=RQm z0V%vRvr$JMg)~w~6C!D#N{N~%(~~s)h|`Wd1$9$VJ011YQbQ#*RZ~?(byZheh4oZf zS)KLOT4SX(S6g+(byr_|1@=~9c^&rGVuK|%S!0z&c3Eeeh4xr!nVt68YNMq#TWht& zc3W?|1@~HUxgGc0a>FGzU31k%e|KGX+lBXBdD)%UR7(}zOj3RK<+opd&pb3>LH7vd9=y%x%ixejBp6?S5>syyK?(?#U#? ztkk`Ky4Ph)t129C#15L!@WK7=De-$Br(EvJ`@URp%q{19bI&ygUGL6A=bQApMgJUi z(;+{-bk$#XJ@(dTXTA2!e_^NncG;=AEVF#m20nP<#S}Yi#N89`lfm8;cS5vWQd5s+((8JHY^EJ;GhRHI71E6(1ta{;SF_|L+X(&K?TZ9fhNc?+6YlK zy~7M?7)7ZHHc?-cT3baVSFd={%Zl%!VivDhMJ#SnSdGIFM-D;?TiBx&7~!A2Yz0RA z0U!xwv;`@MILB7}f5ILpTqFH*@{)$$GGPufefv{BWKu%%UTwbmd7L}GIgm;Sl+9F2HHX^e{7M+Ef^%4W&gBCHIJ4W z&5+PD1lhtURU-vymIj8cV4MkCLC#iuFq&j=VmyhrLwfSCp1;|GJqnqe=DjhS;AzJ{ z%m^GHv_hYoXigGZ(T**ghcgC6r*QHqP!f_;pk=hDDeO2&x&qRg2bH28f8WPG34AiK5|TjwJM4%;0MwC? zX8fsC`$`5+GSZ*|-NI!$YDYMF(vCDeNM=1*+I+{4TQ${i`ul~skXok2e)||DVX#hN$|`>8Ty$ehK94ym0)M` z3EqIpe-Qu@a0qA13PI3>*S%+j?+}SPU!f{Y3YtOh7W7+?1V}&%a=ndz35%Qm>K8XJ z9Hbfl`@#j^H<$^~?|yTW;E4XlptjH^MG8D%YYNu6_a$&R8`_)yF4#6w*eZre*qao) zZ>6L_FFDJI+6m9pAr~&|c=eeC^eXDJW^5h+e^$z4A_JGSLH=wz>HoXr`ZC$c2P4Bq z!;^qTVzQcVyd)V@*kmq0`AFDZJLC(?l<3;3!8rdy*n`ge^D| ziPuQ-IsvOCWINn=>{$N-uLvy=tXIoW9-G#&ElF}hq}d9N`FYeD(RCzYnt*`fwIHkI zB>f;dZj$!ceWY+Kc9USoUo1*jn8r7!e>1(hd}~_L{@ypG`^|6veGgpV1t*9EB>WOO zLmP^HOQh%ASY|Q+!QG0=@2U?DaD#7};|TwF$O$g;jhm%+$Tg6i|ACu3R<}0gT#Xb+ z%Wcm16SQM2tu0b|=NU)xAX~`!p~qWUlImQMAH49-gH91dPBX8JhWaqhgV0o5nX$vxA9Y~rIxMl6I$s`aO9 z{m7PV>E1~}!mwVnq}lZI+ri751Xr`i{gi;B;9lX}Oy0#Fg>jjSz2c+2FvteU=zwlh z>x*LdAPIi3@}|D}s<(cJC0BGG=!zQ1T z^eb-Fyu9)&Xp^=XlOvv6e-!<*g#t7r z9n-eK;xWkrDl#%Rf-og*D?I?f!1HrJ_c5q_>ZsfMBMTudKcXKQ%RovBwaFT(QOmf& z+K~Afh@WCUI+G@p@{(TrHvG9i`xC+-B*G!|pZ^*mj(DL>s52m(5dh>79@8@PyS87- zH6tv-EW|>Ud8XOGvWt=MuA!r{dTSnoYN&!1;9yoFdh`~m=VGA}9ek@CPK4KKCPl~dOJHUjCq(DcZJzrx>!0C`&HYz%o1gg-uW|ApVLFE^CYfHfR&7-4C~1M_?ky~Bu?kJx)swrmOLzq0s^q3 zxxy={MH;2Z%0-(4PPF?euA{rT8z|oxL4@irXmlx(V#Qt*&AsEqiy9-W11St+y3+Fv zc>9M`bS8|%I$(lFDl?@h!zMYhJC91K(JH!)y2OhNe<9k8Mn%Cot9y>7o zfBG@z8pa3o4bGa+++f1bgGPkijarkz^YTUi(9*w!4X~QSCT;qtEvv&%%tnvOPjBtk zk|kL;5ikxdNA`e7xf>5lY915fI<#D*ku}+y^~QRn4Ka&D7~`OAbH3Gpwaa*|Y{ge9 z+@$Y1CGvxzPjW~Fv!p_LTF!{bgP2INe|p+=a!WYeHddo1ZGBRZgtW0GIgcB;vn{!_ zHCwg)H?#$efmq3w^rRjur7*?H%!o2nQdoOXQv}o}Ma4;hf(XMjOj9XZALQG*!zh@F zwY)+#kMcVA(KEU=p&pb#PdvxO3E5NQzFaH8b|4S|lQ%fZ2<>|_9{sDg(lI=$f7D3r zEG#89kaOF%W!r#T+uNmG+_l}@<=x%&-T#j3-NPs`3&k|Vgq>jfh)~lb;Qd|MtzG7Y zUgy=_=-r%?%gopyqoWBag6$v>TMb`5uNx&+{nX9BidRrdy53~1bvng>vRbbTx7+l+ z6yro&EG-}d+b2C)oE^@q)3BBre^-(sA?IY*0fAf6IxovBV2{YCE9*a+YDEZhDZJAt zsPrfg(!81a!~5hfY-%R~fjc;SSAkGCI2t4P{EamOMll^V+M}?7u!YmYMWC`dB_ln| zo22_HSZxZuZlW`|YuEqPr~M^j{w-qqu-WrCNAf78b>bj~U?Hf2SR_r;tXEp4AL|`JyR(W8V&wI@Rc| z?wX)-<}UWyrg1)Jb0+6>-i`JWDt5iAn=307+2w3QqBGa?f9krTgRyuesWV!s zl=4Ns1GH~;o*!;AH(?$%&NxftJdmy0oBs2GkNQ3Me)mw$m1xsHxkZcxk5rf zf}tp@v;R7wh9fMC<4`FZUM%I~mketRg(?5K$icI>=h?x7}b)@z=q=BeILy+l#Q;uwg}`=dXC z?%-H%+^Fv4U_*^YkL#um?*6P))al|D@9-w?@iy-_QBmPm@AW>DY<6#KhSJ^Y48Y+l z`w1Kix$jTOe{a6pZ&V7AWt$>owm~N9Bf$C)o)gr)3ZwrnaQQZoHTwrqh41&K@Cmo@ z3LlaI$M6g1a09Bg9ZsT!Fdl>14 zp|BY*?;elwAOA8S7jhaZY6fv?*EsU|4vi!yjiz35e-{s13JFOmm)_v5^5!jDE6;K) zpIykAq5vn1*Ctz&03+rN75}NG@|(bn@8UM9o{%Yj^D&Qe-B9Yw{-O$@=W*fosKYwp}nBq zbWIoae^4j&QK#Z7zVuWd?hR-44tI6?LuM>=pZu=#R=4$6$MswHpIDD(UC;Gjri_k#EL zgBN&%Pk4mC%-$yPaff*Sv*PxN?{W&SU>A8`C;46<`I9gC zlt=lMHyK{v5tnEAm{)mPO(Bu_?OTQCoG)<{2N<2_d5|}ojTd^3ANryv`lC1cq_6mI ze+T-eryxg5^Qd=pN&j<5pZcq>da5UMtfw7C=Xx&x`auW#tPlG}C;PMCdbA(=v}gOZ zcYCpid$Vu*xVL(>r+cl}tH2RoXOHju#rOM}m-)x{ zb(nYj$%p*Pr~J#e{LIIE0or@U_xzw@{r}nL{oCJt-|zk05B}d5e&9F$;zxes7fFY2{pM$y)JOg3hyLl8 z{_3ax>*o)}cmD0~7|r+o&Hw)J2mkRG|MDmQ^EZF^?tJcN|4Fg__s9PDhyVGPfB*WY zf8<#G_SgSLfqH!hhy*|q07#HTph1EO2OcbVkfFkd2^}JAxNzb{ixM?fjM$MPM35La zdJLJdsXoHcjS>^bwN%%Cxc!YqoDCs3kCeF`U>{BW}ES@=YKB21;oN;Fs>%MH+t_w8DbAbMxNq zyEpLP!h;hZZoD}1y*v2t;=_|4Z@xVG^Xk*H zU+=y>{P*(X)1Pm@KK}dq^Yh>DzdwNg2?!q$QV4V#f(a_PAcGA$_#lL|Av6$$6Ixgy zh8Jp>A%`1!*dd4?iWqAmi6fd=B8n%fm?DcSy4WI&FUlArjWgO)NGDW#PnR8WQ@4FnTJou1?=NuYKl zDyg5E8tSQ|rdleht*YuOtFOu$E3K>A3hS-2=2|PRz3S>KM5tDX>1H0f%BewH7`W+0 zoNdu+u_SFG1+&2JdabqD{<~nJTe#5{n;4srP@Xc>v8rd2BEdnzs;Y7l zVK-Yf-B;UPH{N&aoj2cm``tI-e+wQs;e#7qIO10~>@;j*GX6N^6B9kT(Unsko)j+= z6!yvnNgzdMC1*~G$S8xpbwi(n-uZ7^gT6`UNvz&><}*uBAnFkrNDzPMY-`Q66eVA7 zIq}0AUp(^1E1x{`%RApZ^w>G-c$u1gX1#0JN4`DxPHU(sO4U{t+4z(}W2FU5ibB*KI45KmN8bko>iR6x6y10713|N8zu51_D|C8koOi zks={dXa&hs20;V1FMWUUV;}n9=ROe1FN7Twp$A9!!4;~og(-X?3S)S}8Je($CA=XC zbI3v%(h!F`l%WlE=t3a+aELt=A`cCt8P0g7OeqW5YpBOPDR!@l4tpLJqo>6}JrQ=n z(@ofJmeM=iO5ANvXP8@q$48yi8!5j$4f@jPMnhllC zG$k~jDa~k7bDDqFtR^BQp>6J+e?XX1!Nzs1_?WM-0|9EL8Gr5V)fQF{4 z@hMMr)6>r+z>K279#Da5&!sl#Qs0{!vJ_S@fKhd-Rz+Avd5wA^Y|xhmMe5*DzA)hc2K zdsxLLwy=MVU94jnE7{0W7P6LoEM_NrSp3d*uowk z!(S^hDMsInMvB29s%r8I(|FR>H1A|8aBq{+;i{C8TiAl|@CblmIz)_M-Aw`{;IWgv zaT9EdT`(EC#y|@2W5Sc&7|GCAGv;->g>0)J;dp;u;9RrcpS) zv1^aMb)<8R>s{-**S!9<^H|zc0QeV!DH||sB#=o@7KF9-xFZ=d3AZi0)Vh+0?Mz(? z)6ccm7O?yO^JnH9yiI6wmf}u;v_IE|(jt{K`E4}3PxDk-f-i_aJmS6WZA5)1v50^D zMj{cVBEDheDym%(l?l7W?||pq-wfZk!|@GqfJ^+}4X60REBNXgF7l0& zT;nFsILafQa*V6|hD&smXcZ&{b{93ZT0V^weqoPieU23N_{C1a+>XwHxq=1Rf+n|i z)6TWyA4ZS1paJl6qr21|Vk|mA<_v${(%d}WUPq0u!w&4Ri@oe*KfBq}j`p>yz3ua< zz0?$mH(}*`OI@(Lw_Axj_~&R&Xa!grY}9}Pp|savwroiZ+$euDcm~qw~T-Purxi% z+-=l$ZBrILt(aQzi#O}W%riT!bA@krs22poeBUka|7Yf3{CClczC8B1&i3uQz5Cq% zzW2iq{_%^y{N!JhuoG0*c3|+IHm42V6CE0;TQ9bp)QoHeoZtdSzT7}?e7J4Wm?%WDIy~- zq9ZmUBtD`fMj|Cnq9s-$CT3HX*+x-e5G?%y?%@_}BtZ|B1`wK)Zjb>K3LgNuCtkA;{!$QYi7?M!uvCUZPB9B2CVuP1Yn% z-lR_EBv0<7Pxd5G9tlLs-40rxn$%!zRUg>p9!&n?Nj_ysMkQ2ErBqfWRbHi4&WAHL zC0BN(yfGj+isOF@%3uqQrCF9GTEgI2s%2V|C0o9wTgK&D%4J*5rCrt~Uc%*F>SbQi zC13uf2hyNd4klrW238(sRwAZiCMIJpreiiH_?=;4PG(mIrDgskW?rUdW+rEDre}8l zCTM=9XohBnY~o8IC1pA#WIiTquBL0YCTzZ@Y{us0Y-N9H-lkJFAiaqtLN=st_9k#L zWN;p&a26+V9_KCv_?(c5bJ2YUglT=R?LE zZkDG4(&lWYCwi`@dbTHfzNdSBM@pI}eIg=hjwXKIr+(%qf9|J$_9uY;r+@~ii40{H z`eJ={BYb}@XnZoLgElCHKB$C>p=~Cpg)*8`>1J5^C5P^1hxVn1hA4=RC}4_ciIV7t zo~VkZD2tkCi=wED#wd)==!(jysR-tU=4hi$D33;{kM<~#{-}@!DM?ADjvgsf5&!6b zE~%0>DU&{_lSV0&PN|eu>6Dlzl4dC=5$TY2DVKkKsh5T+n2zb6RH&AoY2?h?hOTLN zPG_5T=bM6Ob-L-C!fBn#DR<)OcX}tC<|&`rsh`H_pXzC!?x~<2>YWa%q9SUbCaOt{ zCu*Xpq(TgtR;rj@s-FmMW>9s;Q9?-yQL?JLx+=WBtGvelE4|LE zz1Azfh6t9jE5F_ox}Gb*{;R+SEWr*eNcDeeza}h$s42niCZPhVp*pI>HY&wNY{eET z#zJhyJ}k#B>cmPok6E#A7u)mkmz?ycYU zE#OYAy5=q6f?d)cuF@i|;wCQRF0SJ?F68dWzTQR>w8BeqSUVN&k^U{`2LG<-b}s0C zu9s%*Y*=nNwZcEVVrUp*ZMEBIAmD%Nrb*?wZcu5i-fF1V=I+OeEXwlk?y79>0`Kp( zEXfit%dV{N7BBJ&ukr@3@h0!_I`8s6FY`w4@T$ejGDz(igtrADQ>I4c@)v)B&dKB! zMy5$_RHW_JM(DP$=(;cbzOVc~CC}Q1^#I>zuwToxf+e{I-qm7hG1K-u*HC}0ZxlT) z0!OX_Colsqumd+R1mh>uuE}UrE>6+e9=5~Dc$*K_RDo>`5T@<|%~Sds=J!5G37Z=R z_g188Zv4LR{Kha0&#(-0AL+Wr;n^8z?62keT?@M*X!PA|`I#npZ$be7p|oKV2IE~E zrdAQh6!p2xEEyh7QA+@lmiT{q5)x~Yc$L!=w{YPGtL_e*+}5qzo^ctYG2N=M8K@$-?56`t%Lw!9C{h=P#PY#25Ys$rqx+%y;&ih7JsSk1ySE=IU$(Y zgP4#aot;(@Pn{*dRI|-lBy-Zwu~`(unI*$cCW)`p88F4vFb%J=Dz|?!E59;G4K8iO z)@Gbm?NUeqy#^42Mk%LRYYBvZ?basz0{_-wYQZ5B6Y-gmB8FX%J@6eJ>XsfBv4P~= zGOt_vp%z_H+X|yH*F`WmKd?B5GdYj5IhQj!OVi{=(U(PC?U9B|o&PUtI2(Xv9pu?x z5Nk5F$qaJ&90rr32Je62&V5;&DTMGHgz$lm>@?j$Br)xQqWe{{PL+)MdUNK&GDf?y zMrSlfZ?wOONj03B#J8xa#2L2^tYI8yx;JGwU7o z-Hs*+kl_uq&qbZH>72JEjYWUs7>AGbHt+RHFH}FZR68|QH?@CNLoZfWZ&p(;S5L23 zM|D+i^;b{zRcp0bvt0IaA|KOYHwRzT39`;@+6J?qK(kIw1HnFv;xOxBolW8Yo!OUB zpWyw&K{Q?6IZ#axG3vI`6gJx~P8}E%ZbwhHM^mo zvk$-2n`wsRxnF-kC%_&tp@J3SO)p_4HQt&1b%aSoU4KU94s^89nP+fJ;U%?FpEGf% zvvC(Uav!&HCpU9X@KTBfAEy}x$J1I1a-30g_&Oay*n*;cp z$2pw4vMiVGGh5>l>S1J0@oBsqc``(8;dyEJ-u1*&%-tMPXCs>%=5jYWb2~buKf0tx zI;FSjI{Q}Ux}WQO4vK-<^$q!_XL|0{AZq>=g7SZsZ7q66$~mjgd8@y=tLLnMuf_%G z-GSftNvnEgt~eQ!aaf=ASpRxggLPRCJ6Vf0u_ODj1ADSF`?4Fmum^jwOS`g1d$Ut- zTEmmB>nN;ud#rmqxPLpQZuYjv?4+N2rKdZ(ue-XpJG;C8o{-z%bDwazpQgBnJHFq$ zzUO~CkGhTxbEKgkM}si`#8cMyuuqi!xy~6Cw#+0{KGH9-CVlJudBz${Kv~Y&CfjB`TNUHD#Hpq&s+Pm zQ@gYKywFekwF5o06aCK@z0xN=(+_>MBfWpq2R+nJ{nMvhwm*1Xo+{ML6p*P~?c zurAhvD$AF>%AdX2r#;%Qz1mwyypKKEf4$ewz1-J5-P0G&!+n+pe6J5h#s9s;2R_9M ze#8&H;TJyQ1ODMFe&Q28<7eQ->;13Zz1>$ng&6{^hZDSPye25{_~${>61S9Z@>3= zqC+Q_lldzve?a_-3Tw9o8EnwO2O*3ws@5o6&B6*XyimgoIo#004nh1-#1Kgw(Zmu_ zJW<6ISzOV@7GZo*#u(`$O2OHH|6nS|rFK-R$E1ElD#)XT1fa+xbxe{+C4FQPNGFAa zl1M3yq!P&`v1~HSC%25U%PGH{aJZom5{e=-A0iVXe>E9mGa)yP$_P%f(v;KAI`Lfd zPCfH%b5B3}eDhC11C6s#I{8c#P(=x4)KEteT~yLWCw;WiNGZ+KQcW@4)YDEuom137 zKOOZ@Qz1q5QdK!+wNz3=h1JzqPnFfyT5(PFR$X&lbyr_|ZS_}SgMGDFT6s+tSY?T2 z)>vneGF?{MW~Y6&0Fpp$zuIW2&DL6NvEA0&Zo!>Z++n{R_gHhGMc3CH2}+PE8fT0b z-g(`+ir#zWmsl7K9DlXfV!P@}Eh*-*VlOWSGvg}5oY>-zF$P&i!A##>{(OXl0} zyaC@k@V*ECTkyjPN8Iql8DHG-!XZ~Y^2R5B9C4@=8fwflLFLKwok0KTbJ0TwYIM^} zKV9|ISts3f)L&mc_SRvqU3S}Pzg_p-d7s^P+<)Ia_}+mpUU=h)KVJFdnIGPHlVseiv-`|P7U>JwIQCF zNy{mh-2eUo$UgyEiGUp|pvex{vINR(fiQE~Q>sQa2}Y2D6SQCjF?c}@W{`s$^k4@; z_(2hdkc1;NVF^)qAMh>ZM)z}J{RHzh7`~8(Gqhn1aesJ29p;dSJM>`>f%wC})J-ae zix|g_xT7Q>eQWK%5wu^koZ~cSInjB}1FcXgE$rnx<9{YYU&51~^R#C@@p(^u=98cM^yd$W z7!}Jk5iCZuBABL#(1Rjzp-OD17ahAx{#7-8* z5Ptw$AVn)SAqB-S(L+@IXFM_2Ppw{ct6k;lSG^imu#Q!%W#tPn?Y6(LIMo*Iu!TL| z+6vfY$!`Ubf)uu}i7h0dN*48>Q@0>dWG<7i%`EIQ4;#(IPIIs%3!ggE8Cl3qR}4&RSu-GG~C;g1snv;rm|siV=bw{f!wYjaWFSQwXv^4RE?Sz6MXX^V{b)%m)+(U|?WHZ9X-scg z)1Bt@r#&5NP>)*FrTz|!#bVXD%(k>+*@P|3bQj!`f)qf`@l#=w05|{F7=PCVp|wh6sbA(-Z@nCCXHQ$&)u#5ft(|RbuV%SVv0Q4)tGTuQW3`Ec zqMz+7Y-8)WA?!X#mHfO3qUf24y>8cDam!}_kfF@iW$1j{8}RtLm*4^iINvlX?)!4i zqdzsO!;cDai4S$+52yIWEq|Wzh+}-?8n1ZALH==(bA03;C;7=up7M~ReB~-w&J>s;N*6x_Lft9V2dEN@XCc)6Oo~_SGK?bq>+AK{K_E7wc<88x>%2T(x zwy}P7t!JIe|gPkp0cQRCtkK- zizt^x3iePsV3NS(v5>+R=WWGVnoMgv+r95%F|<_C(s#1FI?i1$5wyP^eBTS7_`^4T z@sWT0e|6g_;r;hfSbr~Xhf-JQR1NpPr7+u#0)XgmJ~olKn}9>ZhV4+j`*(*Tg*HfE z{6_}tj+|O<=MUWA1*iZ051xORF@-qz^}mEuEC54H070w(0dN2j&;Tt=0YB^j84$%1 z&;ccI0Vl8lDewXnJb5ClK4138cYLy!a|aDT=BVy-CyBl}XT#-xA@ z?BN&eVej^0(DrKHXbtMvEeW1V>4Gf@=xnxtP}zD6*OH9goNV`m?(>pQHL6beo=^&@ za0;!k3bF7CwU92tjw*%(EnW?-#PH8(jJ!xjAkYo&^i9Xej0goH3EJ?`(2We@&CKeK zw${oBjU@ig41eJYPw@Z|@gj}z2yyWSk($J0^AwTu7O@c-@ev&{5+QLCC9x7I@tjai z)x5?m@+t>etF3VFFH+Afzz0!a&z;h*UbfIefG-POaTQ-N7Gbe|a!wX)aTYm)`2^3o z6p9xMg3OkU7oTsvgmM0^<r^y^yi~m=XW%Pyd<`;C~XX|Nj`SDGK%R7X7gw z0rDSV#2)@Z-U9L<1@bPs(A(rIm7eh-qw#Y7P!K2b@GLU%D6$Y6?-DJsBRTRTJu)Og zawKiW7JvTX7ub&^RZ=8b5^C&G6D0#C6LNl95g~1ICT}t)aS}1yiWFdMCw1~Bb#Z@+ z@fgF67<(}(pKmFVF`<~!C?V1rBXTOGvML8hqZY1=s&NEOkSje<15wZ`OYkd25G=dW zEXgt~(Q+)=@&wPaEa6fu8-@i7Ub;>-G4GI>9Q~X`SLH@QY`l}FxN5#jYO4h zgav~VxcCt$9dj`u^D!lJC}wade9|B%Gc#dvAu;kI4>1ruvm!flG(R&V4N){p6DwMB zHCM7VS@ShrGd4Hxt?CUgIY%~e6E<~oJnXS7WHK`8sU|gZIDwNmjkEaNDr+wDIGI!U zhJTW-0>>$v(z>J*Dy!4qrV|>iGAgyRJGpcJSjbH5D|6g29?f$e&$B$yb3N6wJ?Zg1 z(-S_~Q$F1jcxY2OZNawab3fy=Kj+gw>+?TH0_GBv|BAD$8uLJ#b3qlfL5~Zy+KCiU za-$fuLY<8>t71E;(mNR^HAxdSL9`G-^M6A%k_B_~ME`d)MNxD`RkTGV=sv;1ty0oO zX_Q4#^d%istAsO0Ewo2@^hb3mISVuaTA?6)^hl-bIVna%nUq7D^gA~cN}n{~-eo+` zC@Tw7Fts#GxpXc8b1=blOTCmZ$23g4R4&ofFUd4bzf?@?Qq80YwJ>u{)znPO)PGIu z6i?gqO!JhKRIor<2}ygXDg+fs4Yg1a^@kv|EN<=}2$CS@G*KlrpDaDimAI2>h#dTe|wOzlJUEh^=2ozfBwO;M@UUxKE_0?DRwO=o!NNEfJ z{(%$WwWGFjT;uh0O*eJnRdwB! zby0V9VYhW-_jP5rc4_x^T{m|_ZeByTcYU{6F_&|NH+YG6c#YS7_J8(SNA!1@S3(;1 zc^5Z&p?7+vw|c4fdS&%-6SOd_FRN0cdBJxe9hNiJhz}oS2(z@Ai6eG zis&fC(-emV?ZP*IYvX50H)x^OYXR751z2qVcYwq8fXlXk3HWOj7=g{UffJa4B{+Z= zn1US`f**K-4cNc_^nYFaBG>XUEduo|>en*=^!J3l0)16DeOI`JS@?xfBYDBAIwU~Z zh>l>F!`p&FPhxOJ#RG+bm^Z9n=mM>Do!>WbpMW zBZ#%Q(qOoYUHFT=IE=vRh3{_uan!N#puE(=h&BpF0PzT^s?_C?n*)lcIKv;m1lXMX_=q>xu18njIWd; zfKCQsjk?+Z2-uhuK3m3f%_zY{C|KWPS8$&@>zfR*P+QXf-RV& zH#mb!8l_JfgIQXGD|n?{`hsKHq+hzGWjdu>8mDJkrEQv~|Eq&X7%sxFt>8+N%~|wT zxz*qTwRB5ZrQjF(4cYzyy+#==VlACNTAu;>pSgOgy}GLx)Rvj!RLijR4qA;Bn#Nr9 zOu{oc%zu^bY~jt|>J2M8t_MQL%8o++jU%h|*_-{En*sZ;1v{__d$2(koa3VOeu3>) zwaJc|NPhvH)k2-$LQa2ilrxsG^$M<_E{gG)uY(w@zk0MqyR=EW9|ihg;TI048nxg` z*?Kw6^4cSmq$FIct;o!-Lr;xa&mQ3High^$2Y=e5K|6m(SEL8}ktG?DnR~gNTe+b- zlB2u1r~A33`?{$cyREyqDOoY;qLlG6*k0|Zr&xrm7%p_pt!4|l5YkidwTqZ(1nF~{f`;uaiQLI45U6R!HGH$mTcgIe zIK{bK#k>5=y9&=LL6g9)^YvTbv@U4{hLjF)_;M$ zUR{0IU!B;Ez1Tm5(Sv>2Y4yhcXPn7>9NLjQ%A35}qaDhP+}fu-+pnG4yPey&UCO~- z+Q}W;#l3;4oY~dgS&zNlk=@HNNBJwbb|A;Z2@F z6h7fqKIK_{<BoKQzrE?Rp6ai@>ajlRw?6E_-hk6R>CJxTi+?`thkotV z{@q`r?9M*!Q+Vg;KIiSe?*H-r?)4tDdS34RUa8xj?FB#Z3160v{_hbV;$@s#LEhs- zzVSsK@+JTADc|ueKk_Rd^Dlq$x2L-i|MR!E@I`;{Nq_Xs7v@3V@ArQ7^S#5)PqrU;m-uJaX_ql)fy}$e6_U(y#`&D1|%^&vB|NPZI{n`IKXrKIJ{`=)0{O7;^ zldt#TKmOG`()pkB0U|+?1V985G+1z8!h;GCI&27$p~Qt0A5OGbF=S%KiyAR{>^WXh8&QMzpZ36rJFl{8<@v{^G|&YL=M`s@jmr_h~5e-1TTG-%SJN|8Ek z3YDqUrBt6zwOTc5)~i~vdhH69tJtk%zm7FqHf-5~01`+U2q2dM{16!kjCgK>wu~1$ zmox$aQ5f$HL<+&g+`D`K4nDm2@#GC3EQtQR`t+Bb0s$)l|Ch)D0Vsc?h;J=cUX3>1 zh+~dA?zq==K2`_hk3t4X0Vn6vE>k(5veH=oD0cm5S?&YWT#_q;>qWpe#Y4+pnuL8D4~PaStz21 z-nporf<9^}q>4()D5ZanMvCdAnO3^#rJ8=qDX5-?O6sVl=80;erKZ{_tCqUjDXgN( zx~i(D+DhxKt>Vh-uD-t7E3m)D8Z5EH&RQ(8$JV;6uEIWREVRl>%Ph6dMvLvV*;c#% z?X}u|%PqLxhD+|a=H`lSvgM}REW6gaODqro6g1^*Lta8gN zzwC0%FwZP=%{1R^bIv&LtaHyi>sN2bLJv)J(R@{m^u$Uh&GgbvHx2dEQb$eo)K*uG z_0?KuEghIfe+_?j*c%_zR(o{1d6rset*v(3X}_Ho+-%G3cHDN)eRth>@2z*=bpM?< z;Cu`2ci@H(et6-CFRpmwgg>4*#V=-y6P9p>v`?A-;UVNKJTu3@4Wx+d+@*yFMOBm1OX6#A!?3$_10g17wp={ zzJ2%HfA4+x;EykU`Q)E(e)^qxjeYy>SD$8^o}%3?yyDuwuDa*ykN^Jd@(;lO3GiJ3 zB;Ww+S3m?F@O}wo-~#jaK>rzVf(oRd11$(a3`($q9K2u$Gx)&`dT@jwB%ugPh{6-9 zuzxIUU=zb zVsew0?4%_>sYy?U5|o@Ir6@~zlFZ@okF0E^c=8BKJ;rjDvaBU7Z)wY1;&PX+YoaTE z3CzQov5dqVrZJ0&%wsAunao_KGn)y`XG$}g(L`e~uZhhb>2jOA>?SwA>CJG0bDZKV zCplxo$84&9bDeY%qX9JT01J=)QD+R*QePU8zf3 z3e%U$G^SF@js$3T(wqi!nmnauPk-vupaONMLM#z2})etANrSx<>pQ>t~8Y(48*-P+c)#`Udog{xibidVYo zb*_B9>t6lZ*SrR{DXI(;SPu(DttytQj9si_8w=UTN;a|s^Ce<0t3#%0R;irbtYh^Z-Ob0jnqs49RazpxD;|kZg(~T~5kz3vDVt2dB|J81HxyxMZir2g0B`5xLTlmU1zVw~1ed|j{o8q*+{N*8kYEKK`{|Y$31Rk(~3yk0cD>%W%TyK9L z9L4)e7{3#)FoiE{;S6J#V7Y~`hdCzJcal}GfGshJPyAOD4|c^SPO*zwtl}2K_{B1= zag1+FV;s+T$2K-)VLuFHp>0^hL>{t{i;UzWD>=z!)6RZ_3}t*Vc)?VjvX!fhh#|-Bk^)`@$94~m!duQq9`MTfjb9wh{ zXFuav(0V2`pZ{#=KqGq5gJyK09ewCVOZw51jx_(JFKy{eD;inqEoq!T4ahZ%y3M2> zwW&*u>QkG}vY>9YjlK-)F3URBw4Swpt!s_zTkAU4{t2_If6bj#3tQF0E;g}`ZR}*H z=FPxvc49*8+Y#q@+C0{AkE>1XYirxu+|G8lu?_BRdwbmB{VO^ z_N0qF>|`Ii*|BzUuV)wb5;&t30$-+SKw?)Sj=eemmc zOxM+}@3JfY?2KQ$;~NioB{d!KLEpONvyS=8Yd-Uw-@NBLkG#WE-r0{YedJGX`qZO- z^`=St=o3HII!jyTJkNQ_X|HqJ;~e+5uQ~5`@B84_{`Woa{qTjKd*bVVzWBgDzVnlh zeBaWlG>|=lX+Aq3(tMByBiy!pkFF*OuZ~pY7fBgp$J@{MA{rGoZ{`8-} z{cr2~_XC^l9Ot;;`3``9M|c7lfd5B;2WbC*2{?cXSbz)YfDH(N2pCuFrhgb$e;0Uv z8Hj-!sDW5Dc@;={>?eYM>PLbjXo4k(f+whgrsaMhIA$COgB=)yGbn>K7)1H!f}gf= z^CNvd*nH6Ud_Ks0L%4iJsC-8leLpCKMwo;|h=f6?gi+{(O~`~$IE7Y-UDTI@`$mIZ zXoFwqg<%MWTl0Ne=x!;9hAXIsX~>3a=!R_whcVKEW(Z_rXoq8ec!zn2hkIxxI7o+4 z#(z9mRuUM23`mHFXo!jUfQYz=idcw^2#JpRcMUCNQ_i}Sd7bvh0S=3(5Q^j$c)wajMkWp$mopeH7_IubiEj7xG0XgNRH!Z zj`R0{;0R@}=!)&gj_>G>@d%Ie*k!S(j=FV@`B;wosE_?valCks?G=c?Xo&?GiIj+t z23d&*sgMcDkPo?#5$TW;*^m_}Hxw95m=Bkso<~ed(By5JrzBDUT;SRN^pAW4%mX_K}EkTAJgzc_o?xQ#zajn^oY*(iCbCi>C304EylN0He7DXmYunlpZQaRR1u&_nx1KzS@e>dX;PNSnwaUDtqGeGlb5QQ zQ#>hqR!NmnIh0F@o4cu$zd4n@8Js}@oV|&Z!fBj;ys4YV$(+iWo6osWSV@}#`I^-k zn^wXmjgg(#X`S7v9A!A2WLcWyd79;!MIkW|9YLP!X`bzAGpQM#*u