Merge pull request #1419 from lainedfles/embedding-model-fix-and-manual-update

feat: improve embedding model update & resolve network dependency
This commit is contained in:
Timothy Jaeryang Baek 2024-04-10 01:10:07 -07:00 committed by GitHub
commit b9cadff16b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 438 additions and 210 deletions

View file

@ -13,7 +13,6 @@ import os, shutil, logging, re
from pathlib import Path
from typing import List
from sentence_transformers import SentenceTransformer
from chromadb.utils import embedding_functions
from chromadb.utils.batch_utils import create_batches
@ -46,7 +45,7 @@ from apps.web.models.documents import (
DocumentResponse,
)
from apps.rag.utils import query_doc, query_collection
from apps.rag.utils import query_doc, query_collection, get_embedding_model_path
from utils.misc import (
calculate_sha256,
@ -60,6 +59,7 @@ from config import (
UPLOAD_DIR,
DOCS_DIR,
RAG_EMBEDDING_MODEL,
RAG_EMBEDDING_MODEL_AUTO_UPDATE,
DEVICE_TYPE,
CHROMA_CLIENT,
CHUNK_SIZE,
@ -78,12 +78,18 @@ app.state.PDF_EXTRACT_IMAGES = False
app.state.CHUNK_SIZE = CHUNK_SIZE
app.state.CHUNK_OVERLAP = CHUNK_OVERLAP
app.state.RAG_TEMPLATE = RAG_TEMPLATE
app.state.RAG_EMBEDDING_MODEL = RAG_EMBEDDING_MODEL
app.state.TOP_K = 4
app.state.sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=app.state.RAG_EMBEDDING_MODEL,
model_name=get_embedding_model_path(
app.state.RAG_EMBEDDING_MODEL, RAG_EMBEDDING_MODEL_AUTO_UPDATE
),
device=DEVICE_TYPE,
)
)
@ -135,18 +141,34 @@ class EmbeddingModelUpdateForm(BaseModel):
async def update_embedding_model(
form_data: EmbeddingModelUpdateForm, user=Depends(get_admin_user)
):
app.state.RAG_EMBEDDING_MODEL = form_data.embedding_model
app.state.sentence_transformer_ef = (
log.info(
f"Updating embedding model: {app.state.RAG_EMBEDDING_MODEL} to {form_data.embedding_model}"
)
try:
sentence_transformer_ef = (
embedding_functions.SentenceTransformerEmbeddingFunction(
model_name=app.state.RAG_EMBEDDING_MODEL,
model_name=get_embedding_model_path(form_data.embedding_model, True),
device=DEVICE_TYPE,
)
)
app.state.RAG_EMBEDDING_MODEL = form_data.embedding_model
app.state.sentence_transformer_ef = sentence_transformer_ef
return {
"status": True,
"embedding_model": app.state.RAG_EMBEDDING_MODEL,
}
except Exception as e:
log.exception(f"Problem updating embedding model: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT(e),
)
@app.get("/config")
async def get_rag_config(user=Depends(get_admin_user)):

View file

@ -1,6 +1,8 @@
import os
import re
import logging
from typing import List
from huggingface_hub import snapshot_download
from config import SRC_LOG_LEVELS, CHROMA_CLIENT
@ -188,3 +190,43 @@ def rag_messages(docs, messages, template, k, embedding_function):
messages[last_user_message_idx] = new_user_message
return messages
def get_embedding_model_path(
embedding_model: str, update_embedding_model: bool = False
):
# Construct huggingface_hub kwargs with local_files_only to return the snapshot path
cache_dir = os.getenv("SENTENCE_TRANSFORMERS_HOME")
local_files_only = not update_embedding_model
snapshot_kwargs = {
"cache_dir": cache_dir,
"local_files_only": local_files_only,
}
log.debug(f"embedding_model: {embedding_model}")
log.debug(f"snapshot_kwargs: {snapshot_kwargs}")
# Inspiration from upstream sentence_transformers
if (
os.path.exists(embedding_model)
or ("\\" in embedding_model or embedding_model.count("/") > 1)
and local_files_only
):
# If fully qualified path exists, return input, else set repo_id
return embedding_model
elif "/" not in embedding_model:
# Set valid repo_id for model short-name
embedding_model = "sentence-transformers" + "/" + embedding_model
snapshot_kwargs["repo_id"] = embedding_model
# Attempt to query the huggingface_hub library to determine the local path and/or to update
try:
embedding_model_repo_path = snapshot_download(**snapshot_kwargs)
log.debug(f"embedding_model_repo_path: {embedding_model_repo_path}")
return embedding_model_repo_path
except Exception as e:
log.exception(f"Cannot determine embedding model snapshot path: {e}")
return embedding_model

View file

@ -403,6 +403,12 @@ CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
# this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL}"),
RAG_EMBEDDING_MODEL_AUTO_UPDATE = (
os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true"
)
# device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")

View file

@ -345,3 +345,64 @@ export const resetVectorDB = async (token: string) => {
return res;
};
export const getEmbeddingModel = async (token: string) => {
let error = null;
const res = await fetch(`${RAG_API_BASE_URL}/embedding/model`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err.detail;
return null;
});
if (error) {
throw error;
}
return res;
};
type EmbeddingModelUpdateForm = {
embedding_model: string;
};
export const updateEmbeddingModel = async (token: string, payload: EmbeddingModelUpdateForm) => {
let error = null;
const res = await fetch(`${RAG_API_BASE_URL}/embedding/model/update`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify({
...payload
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
error = err.detail;
return null;
});
if (error) {
throw error;
}
return res;
};

View file

@ -6,18 +6,23 @@
getQuerySettings,
scanDocs,
updateQuerySettings,
resetVectorDB
resetVectorDB,
getEmbeddingModel,
updateEmbeddingModel
} from '$lib/apis/rag';
import { documents } from '$lib/stores';
import { onMount, getContext } from 'svelte';
import { toast } from 'svelte-sonner';
import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n');
export let saveHandler: Function;
let loading = false;
let scanDirLoading = false;
let updateEmbeddingModelLoading = false;
let showResetConfirm = false;
@ -30,10 +35,12 @@
k: 4
};
let embeddingModel = '';
const scanHandler = async () => {
loading = true;
scanDirLoading = true;
const res = await scanDocs(localStorage.token);
loading = false;
scanDirLoading = false;
if (res) {
await documents.set(await getDocs(localStorage.token));
@ -41,6 +48,38 @@
}
};
const embeddingModelUpdateHandler = async () => {
if (embeddingModel.split('/').length - 1 > 1) {
toast.error(
$i18n.t(
'Model filesystem path detected. Model shortname is required for update, cannot continue.'
)
);
return;
}
console.log('Update embedding model attempt:', embeddingModel);
updateEmbeddingModelLoading = true;
const res = await updateEmbeddingModel(localStorage.token, {
embedding_model: embeddingModel
}).catch(async (error) => {
toast.error(error);
embeddingModel = (await getEmbeddingModel(localStorage.token)).embedding_model;
return null;
});
updateEmbeddingModelLoading = false;
if (res) {
console.log('embeddingModelUpdateHandler:', res);
if (res.status === true) {
toast.success($i18n.t('Model {{embedding_model}} update complete!', res), {
duration: 1000 * 10
});
}
}
};
const submitHandler = async () => {
const res = await updateRAGConfig(localStorage.token, {
pdf_extract_images: pdfExtractImages,
@ -62,6 +101,8 @@
chunkOverlap = res.chunk.chunk_overlap;
}
embeddingModel = (await getEmbeddingModel(localStorage.token)).embedding_model;
querySettings = await getQuerySettings(localStorage.token);
});
</script>
@ -73,7 +114,7 @@
saveHandler();
}}
>
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-80">
<div class=" space-y-3 pr-1.5 overflow-y-scroll max-h-[22rem]">
<div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('General Settings')}</div>
@ -83,7 +124,7 @@
</div>
<button
class=" self-center text-xs p-1 px-3 bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 rounded flex flex-row space-x-1 items-center {loading
class=" self-center text-xs p-1 px-3 bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 rounded-lg flex flex-row space-x-1 items-center {scanDirLoading
? ' cursor-not-allowed'
: ''}"
on:click={() => {
@ -91,24 +132,11 @@
console.log('check');
}}
type="button"
disabled={loading}
disabled={scanDirLoading}
>
<div class="self-center font-medium">{$i18n.t('Scan')}</div>
<!-- <svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-3 h-3"
>
<path
fill-rule="evenodd"
d="M13.836 2.477a.75.75 0 0 1 .75.75v3.182a.75.75 0 0 1-.75.75h-3.182a.75.75 0 0 1 0-1.5h1.37l-.84-.841a4.5 4.5 0 0 0-7.08.932.75.75 0 0 1-1.3-.75 6 6 0 0 1 9.44-1.242l.842.84V3.227a.75.75 0 0 1 .75-.75Zm-.911 7.5A.75.75 0 0 1 13.199 11a6 6 0 0 1-9.44 1.241l-.84-.84v1.371a.75.75 0 0 1-1.5 0V9.591a.75.75 0 0 1 .75-.75H5.35a.75.75 0 0 1 0 1.5H3.98l.841.841a4.5 4.5 0 0 0 7.08-.932.75.75 0 0 1 1.025-.273Z"
clip-rule="evenodd"
/>
</svg> -->
{#if loading}
{#if scanDirLoading}
<div class="ml-3 self-center">
<svg
class=" w-3 h-3"
@ -141,6 +169,78 @@
<hr class=" dark:border-gray-700" />
<div class="space-y-2">
<div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('Update Embedding Model')}</div>
<div class="flex w-full">
<div class="flex-1 mr-2">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Update embedding model (e.g. {{model}})', {
model: embeddingModel.slice(-40)
})}
bind:value={embeddingModel}
/>
</div>
<button
class="px-2.5 bg-gray-100 hover:bg-gray-200 text-gray-800 dark:bg-gray-850 dark:hover:bg-gray-800 dark:text-gray-100 rounded-lg transition"
on:click={() => {
embeddingModelUpdateHandler();
}}
disabled={updateEmbeddingModelLoading}
>
{#if updateEmbeddingModelLoading}
<div class="self-center">
<svg
class=" w-4 h-4"
viewBox="0 0 24 24"
fill="currentColor"
xmlns="http://www.w3.org/2000/svg"
><style>
.spinner_ajPY {
transform-origin: center;
animation: spinner_AtaB 0.75s infinite linear;
}
@keyframes spinner_AtaB {
100% {
transform: rotate(360deg);
}
}
</style><path
d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
opacity=".25"
/><path
d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
class="spinner_ajPY"
/></svg
>
</div>
{:else}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 2.75a.75.75 0 0 0-1.5 0v5.69L5.03 6.22a.75.75 0 0 0-1.06 1.06l3.5 3.5a.75.75 0 0 0 1.06 0l3.5-3.5a.75.75 0 0 0-1.06-1.06L8.75 8.44V2.75Z"
/>
<path
d="M3.5 9.75a.75.75 0 0 0-1.5 0v1.5A2.75 2.75 0 0 0 4.75 14h6.5A2.75 2.75 0 0 0 14 11.25v-1.5a.75.75 0 0 0-1.5 0v1.5c0 .69-.56 1.25-1.25 1.25h-6.5c-.69 0-1.25-.56-1.25-1.25v-1.5Z"
/>
</svg>
{/if}
</button>
</div>
<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t(
'Warning: If you update or change your embedding model, you will need to re-import all documents.'
)}
</div>
<hr class=" dark:border-gray-700 my-3" />
<div class=" ">
<div class=" text-sm font-medium">{$i18n.t('Chunk Params')}</div>
@ -150,7 +250,7 @@
<div class="self-center p-3">
<input
class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
class=" w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Size')}
bind:value={chunkSize}
@ -161,11 +261,13 @@
</div>
<div class="flex w-full">
<div class=" self-center text-xs font-medium min-w-fit">{$i18n.t('Chunk Overlap')}</div>
<div class=" self-center text-xs font-medium min-w-fit">
{$i18n.t('Chunk Overlap')}
</div>
<div class="self-center p-3">
<input
class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
class="w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Overlap')}
bind:value={chunkOverlap}
@ -176,7 +278,7 @@
</div>
</div>
<div>
<div class="pr-2">
<div class="flex justify-between items-center text-xs">
<div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div>
@ -191,6 +293,8 @@
</div>
</div>
<hr class=" dark:border-gray-700 my-3" />
<div>
<div class=" text-sm font-medium">{$i18n.t('Query Params')}</div>
@ -200,7 +304,7 @@
<div class="self-center p-3">
<input
class=" w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
class=" w-full rounded-lg py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Top K')}
bind:value={querySettings.k}
@ -209,34 +313,19 @@
/>
</div>
</div>
<!-- <div class="flex w-full">
<div class=" self-center text-xs font-medium min-w-fit">Chunk Overlap</div>
<div class="self-center p-3">
<input
class="w-full rounded py-1.5 px-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none border border-gray-100 dark:border-gray-600"
type="number"
placeholder="Enter Chunk Overlap"
bind:value={chunkOverlap}
autocomplete="off"
min="0"
/>
</div>
</div> -->
</div>
<div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div>
<textarea
bind:value={querySettings.template}
class="w-full rounded p-4 text-sm dark:text-gray-300 dark:bg-gray-800 outline-none resize-none"
class="w-full rounded-lg px-4 py-3 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
rows="4"
/>
</div>
</div>
<hr class=" dark:border-gray-700" />
<hr class=" dark:border-gray-700 my-3" />
{#if showResetConfirm}
<div class="flex justify-between rounded-md items-center py-2 px-3.5 w-full transition">
@ -330,7 +419,8 @@
</button>
{/if}
</div>
</div>
</div>
<div class="flex justify-end pt-3 text-sm font-medium">
<button
class=" px-4 py-2 bg-emerald-600 hover:bg-emerald-700 text-gray-100 transition rounded"

View file

@ -120,6 +120,7 @@
"Edit Doc": "",
"Edit User": "",
"Email": "",
"Embedding model: {{embedding_model}}": "",
"Enable Chat History": "",
"Enable New Sign Ups": "",
"Enabled": "",
@ -194,8 +195,11 @@
"MMMM DD, YYYY": "",
"Model '{{modelName}}' has been successfully downloaded.": "",
"Model '{{modelTag}}' is already in queue for downloading.": "",
"Model {{embedding_model}} update complete!": "",
"Model {{embedding_model}} update failed or not required!": "",
"Model {{modelId}} not found": "",
"Model {{modelName}} already exists.": "",
"Model filesystem path detected. Model shortname is required for update, cannot continue.": "",
"Model Name": "",
"Model not selected": "",
"Model Tag Name": "",
@ -333,7 +337,10 @@
"TTS Settings": "",
"Type Hugging Face Resolve (Download) URL": "",
"Uh-oh! There was an issue connecting to {{provider}}.": "",
"Understand that updating or changing your embedding model requires reset of the vector database and re-import of all documents. You have been warned!": "",
"Unknown File Type '{{file_type}}', but accepting and treating as plain text": "",
"Update": "",
"Update embedding model {{embedding_model}}": "",
"Update password": "",
"Upload a GGUF model": "",
"Upload files": "",