diff --git a/backend/apps/audio/main.py b/backend/apps/audio/main.py index 02d1f5e8..f93b50f6 100644 --- a/backend/apps/audio/main.py +++ b/backend/apps/audio/main.py @@ -28,6 +28,7 @@ from config import ( UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR, + WHISPER_MODEL_AUTO_UPDATE, DEVICE_TYPE, ) @@ -69,12 +70,24 @@ def transcribe( f.write(contents) f.close() - model = WhisperModel( - WHISPER_MODEL, - device=whisper_device_type, - compute_type="int8", - download_root=WHISPER_MODEL_DIR, - ) + whisper_kwargs = { + "model_size_or_path": WHISPER_MODEL, + "device": whisper_device_type, + "compute_type": "int8", + "download_root": WHISPER_MODEL_DIR, + "local_files_only": not WHISPER_MODEL_AUTO_UPDATE, + } + + log.debug(f"whisper_kwargs: {whisper_kwargs}") + + try: + model = WhisperModel(**whisper_kwargs) + except: + log.warning( + "WhisperModel initialization failed, attempting download with local_files_only=False" + ) + whisper_kwargs["local_files_only"] = False + model = WhisperModel(**whisper_kwargs) segments, info = model.transcribe(file_path, beam_size=5) log.info( diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index af8cbf7c..f39984de 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -29,7 +29,13 @@ import base64 import json import logging -from config import SRC_LOG_LEVELS, CACHE_DIR, AUTOMATIC1111_BASE_URL, COMFYUI_BASE_URL +from config import ( + SRC_LOG_LEVELS, + CACHE_DIR, + ENABLE_IMAGE_GENERATION, + AUTOMATIC1111_BASE_URL, + COMFYUI_BASE_URL, +) log = logging.getLogger(__name__) @@ -48,7 +54,7 @@ app.add_middleware( ) app.state.ENGINE = "" -app.state.ENABLED = False +app.state.ENABLED = ENABLE_IMAGE_GENERATION app.state.OPENAI_API_KEY = "" app.state.MODEL = "" diff --git a/backend/apps/ollama/main.py b/backend/apps/ollama/main.py index 5e19a8e3..7140cad9 100644 --- a/backend/apps/ollama/main.py +++ b/backend/apps/ollama/main.py @@ -612,8 +612,13 @@ async def generate_embeddings( user=Depends(get_current_user), ): if url_idx == None: - if form_data.model in app.state.MODELS: - url_idx = random.choice(app.state.MODELS[form_data.model]["urls"]) + model = form_data.model + + if ":" not in model: + model = f"{model}:latest" + + if model in app.state.MODELS: + url_idx = random.choice(app.state.MODELS[model]["urls"]) else: raise HTTPException( status_code=400, @@ -672,8 +677,13 @@ async def generate_completion( ): if url_idx == None: - if form_data.model in app.state.MODELS: - url_idx = random.choice(app.state.MODELS[form_data.model]["urls"]) + model = form_data.model + + if ":" not in model: + model = f"{model}:latest" + + if model in app.state.MODELS: + url_idx = random.choice(app.state.MODELS[model]["urls"]) else: raise HTTPException( status_code=400, @@ -770,8 +780,13 @@ async def generate_chat_completion( ): if url_idx == None: - if form_data.model in app.state.MODELS: - url_idx = random.choice(app.state.MODELS[form_data.model]["urls"]) + model = form_data.model + + if ":" not in model: + model = f"{model}:latest" + + if model in app.state.MODELS: + url_idx = random.choice(app.state.MODELS[model]["urls"]) else: raise HTTPException( status_code=400, @@ -874,8 +889,13 @@ async def generate_openai_chat_completion( ): if url_idx == None: - if form_data.model in app.state.MODELS: - url_idx = random.choice(app.state.MODELS[form_data.model]["urls"]) + model = form_data.model + + if ":" not in model: + model = f"{model}:latest" + + if model in app.state.MODELS: + url_idx = random.choice(app.state.MODELS[model]["urls"]) else: raise HTTPException( status_code=400, diff --git a/backend/config.py b/backend/config.py index cb4aca75..6d93115b 100644 --- a/backend/config.py +++ b/backend/config.py @@ -413,7 +413,7 @@ RAG_EMBEDDING_MODEL_AUTO_UPDATE = ( ) -# device type embbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance +# device type embedding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false") if USE_CUDA.lower() == "true": @@ -450,11 +450,17 @@ Query: [query]""" WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base") WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models") +WHISPER_MODEL_AUTO_UPDATE = ( + os.environ.get("WHISPER_MODEL_AUTO_UPDATE", "").lower() == "true" +) #################################### # Images #################################### +ENABLE_IMAGE_GENERATION = ( + os.environ.get("ENABLE_IMAGE_GENERATION", "").lower() == "true" +) AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "") COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "") diff --git a/src/app.html b/src/app.html index f731761c..2d1ef0d1 100644 --- a/src/app.html +++ b/src/app.html @@ -3,7 +3,7 @@ - + + +
+ +
+ + + +
+ {#each pages as page (page.key)} + {#if page.type === 'ellipsis'} +
...
+ {:else} + + {page.value} + + {/if} + {/each} +
+ + + +
+
+
diff --git a/src/lib/components/icons/ChevronLeft.svelte b/src/lib/components/icons/ChevronLeft.svelte new file mode 100644 index 00000000..78ee64d2 --- /dev/null +++ b/src/lib/components/icons/ChevronLeft.svelte @@ -0,0 +1,15 @@ + + + + + diff --git a/src/lib/components/icons/ChevronRight.svelte b/src/lib/components/icons/ChevronRight.svelte new file mode 100644 index 00000000..7daf4a14 --- /dev/null +++ b/src/lib/components/icons/ChevronRight.svelte @@ -0,0 +1,15 @@ + + + + + diff --git a/src/lib/components/layout/Sidebar/ChatMenu.svelte b/src/lib/components/layout/Sidebar/ChatMenu.svelte index 36ac1457..b5a0a86a 100644 --- a/src/lib/components/layout/Sidebar/ChatMenu.svelte +++ b/src/lib/components/layout/Sidebar/ChatMenu.svelte @@ -26,14 +26,14 @@
{ renameHandler(); }} @@ -43,7 +43,7 @@ { deleteHandler(); }} diff --git a/src/lib/i18n/locales/vi-VN/translation.json b/src/lib/i18n/locales/vi-VN/translation.json index 1ef7fef8..36ad86ab 100644 --- a/src/lib/i18n/locales/vi-VN/translation.json +++ b/src/lib/i18n/locales/vi-VN/translation.json @@ -55,9 +55,9 @@ "Check for updates": "Kiểm tra cập nhật", "Checking for updates...": "Đang kiểm tra cập nhật...", "Choose a model before saving...": "Chọn mô hình trước khi lưu...", - "Chunk Overlap": "Kích thước chồng lấn (overlap)", + "Chunk Overlap": "Chồng lấn (overlap)", "Chunk Params": "Cài đặt số lượng ký tự cho khối ký tự (chunk)", - "Chunk Size": "Kích thức khối (size)", + "Chunk Size": "Kích thước khối (size)", "Click here for help.": "Bấm vào đây để được trợ giúp.", "Click here to check other modelfiles.": "Bấm vào đây để kiểm tra các tệp mô tả mô hình (modelfiles) khác.", "Click here to select": "Bấm vào đây để chọn", @@ -65,7 +65,7 @@ "click here.": "bấm vào đây.", "Click on the user role button to change a user's role.": "Bấm vào nút trong cột VAI TRÒ để thay đổi quyền của người sử dụng.", "Close": "Đóng", - "Collection": "Bộ sưu tập", + "Collection": "Tổng hợp mọi tài liệu", "Command": "Lệnh", "Confirm Password": "Xác nhận Mật khẩu", "Connections": "Kết nối", @@ -76,7 +76,7 @@ "Copy last response": "Sao chép phản hồi cuối cùng", "Copying to clipboard was successful!": "Sao chép vào clipboard thành công!", "Create a concise, 3-5 word phrase as a header for the following query, strictly adhering to the 3-5 word limit and avoiding the use of the word 'title':": "Tạo một cụm từ súc tích, 3-5 từ làm tiêu đề cho truy vấn sau, tuân thủ nghiêm ngặt giới hạn 3-5 từ và tránh sử dụng từ 'tiêu đề':", - "Create a modelfile": "Tạo tệp mô tả mô hình", + "Create a modelfile": "Tạo tệp mô tả cho mô hình", "Create Account": "Tạo Tài khoản", "Created at": "Được tạo vào lúc", "Created by": "Được tạo bởi", @@ -347,7 +347,7 @@ "Valid time units:": "Đơn vị thời gian hợp lệ:", "variable": "biến", "variable to have them replaced with clipboard content.": "biến để có chúng được thay thế bằng nội dung clipboard.", - "Version": "Phiên bản", + "Version": "Version", "Web": "Web", "WebUI Add-ons": "Tiện ích WebUI", "WebUI Settings": "Cài đặt WebUI", diff --git a/src/lib/utils/index.ts b/src/lib/utils/index.ts index 3ea82783..132c57d8 100644 --- a/src/lib/utils/index.ts +++ b/src/lib/utils/index.ts @@ -493,4 +493,25 @@ export const templatePrompt = (template: string, prompt: string) => { } return template; + +export const approximateToHumanReadable = (nanoseconds: number) => { + const seconds = Math.floor((nanoseconds / 1e9) % 60); + const minutes = Math.floor((nanoseconds / 6e10) % 60); + const hours = Math.floor((nanoseconds / 3.6e12) % 24); + + const results: string[] = []; + + if (seconds >= 0) { + results.push(`${seconds}s`); + } + + if (minutes > 0) { + results.push(`${minutes}m`); + } + + if (hours > 0) { + results.push(`${hours}h`); + } + + return results.reverse().join(' '); }; diff --git a/src/routes/(app)/+layout.svelte b/src/routes/(app)/+layout.svelte index ff3ecb76..2000d87c 100644 --- a/src/routes/(app)/+layout.svelte +++ b/src/routes/(app)/+layout.svelte @@ -106,11 +106,6 @@ // IndexedDB Not Found } - console.log(); - - await models.set(await getModels()); - await tick(); - await settings.set(JSON.parse(localStorage.getItem('settings') ?? '{}')); await modelfiles.set(await getModelfiles(localStorage.token)); diff --git a/src/routes/(app)/admin/+page.svelte b/src/routes/(app)/admin/+page.svelte index a90b3262..a3493cb6 100644 --- a/src/routes/(app)/admin/+page.svelte +++ b/src/routes/(app)/admin/+page.svelte @@ -12,6 +12,7 @@ import { getSignUpEnabledStatus, toggleSignUpEnabledStatus } from '$lib/apis/auths'; import EditUserModal from '$lib/components/admin/EditUserModal.svelte'; import SettingsModal from '$lib/components/admin/SettingsModal.svelte'; + import Pagination from '$lib/components/common/Pagination.svelte'; const i18n = getContext('i18n'); @@ -21,6 +22,8 @@ let search = ''; let selectedUser = null; + let page = 1; + let showSettingsModal = false; let showEditUserModal = false; @@ -159,15 +162,17 @@ - {#each users.filter((user) => { - if (search === '') { - return true; - } else { - let name = user.name.toLowerCase(); - const query = search.toLowerCase(); - return name.includes(query); - } - }) as user} + {#each users + .filter((user) => { + if (search === '') { + return true; + } else { + let name = user.name.toLowerCase(); + const query = search.toLowerCase(); + return name.includes(query); + } + }) + .slice((page - 1) * 20, page * 20) as user}
+ +